prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 11:15:03 2018
@author: XieTianwen
@assignment_author: <NAME> [On Sat. Dec. 01 09:46 8102]
"""
import numpy as np
import matplotlib.pyplot as plt
def power_iteration(M,eta=1e-7,max_iter_N=100):
"""
M: the matrix for eigen vector decomposition [np.array]
eta: minimum tolerance
max_iter_N: max iteration times for computing
use power iteration to get the largest eigen vector of matrix M
"""
# initialize x
x = np.random.randn(M.shape[0])
lam = 0
for k in range(max_iter_N):
y = x / x.max()
x = M.dot(y)
beta = x.max()
err = abs(beta - lam)
print("{}/{} err:{}".format(k+1,max_iter_N,err))
y = x / beta
if err <= eta:
print("Power Iteration Stop! [Success]")
break
else:
lam = beta
if k >= (max_iter_N - 1):
print("Exceed Max Iteration! [Failure]")
return beta,y # eigen value, normalized eigen vector
def orthogonal_vec(a):
"""
get an orthogonol vector of vector a
"""
b = np.ones_like(a)
a_m = a.T.dot(a)
oc = b - a.T.dot(a.T.dot(b)/a_m)
return oc
def second_sv_fn(B,s_py,max_iter_N=10):
"""
B: matrix
s_py: vector of sqrt_P of y
get the 2nd largest sigular vector of B
"""
norm = lambda x:x/np.sqrt(x.dot(x.T)) # normalizer
psi_1 = orthogonal_vec(s_py)
for i in range(max_iter_N):
psi_0 = psi_1
phi_1 = B.dot(psi_1)
phi_1 = phi_1 / phi_1.max()
psi_1 = phi_1.T.dot(B)
phi_1 = phi_1 / phi_1.max()
err = np.mean((abs(psi_0-psi_1)))
print("{}/{} err psi {}:".format(i+1,max_iter_N,err))
phi_1 = norm(phi_1)
psi_1 = norm(psi_1)
# U,_,V = svd(B)
# psi_1 == V[1,;]
# phi_1 == U[:,1]
return phi_1,psi_1
def main():
plt.style.use('ggplot')
num = np.load('num.npy')
'''
your codes
'''
# acquire matrix B
Pxy = num / np.sum(num)
Px = np.sum(num,1) / | np.sum(num) | numpy.sum |
# Allen Institute Software License - This software license is the 2-clause BSD
# license plus a third clause that prohibits redistribution for commercial
# purposes without further permission.
#
# Copyright 2015-2016. Allen Institute. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Redistributions for commercial purposes are not permitted without the
# Allen Institute's written permission.
# For purposes of this license, commercial purposes is the incorporation of the
# Allen Institute's software into anything for which you will charge fees or
# other compensation. Contact <EMAIL> for commercial licensing
# opportunities.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import math
import numpy as np
import scipy.signal as signal
import logging
# Design notes:
# to generate an average feature file, all sweeps must have all features
# to generate a fitness score of a sweep to a feature file,, the sweep
# must have all features in the file. If one is absent, a penalty
# of TODO ??? will be assessed
# set of features
class EphysFeatures( object ):
def __init__(self, name):
# feature mean and standard deviations
self.mean = {}
self.stdev = {}
# human-readable names for features
self.glossary = {}
# table indicating how to score feature
# 'hit' feature exists:
# 'ignore' do nothing
# 'stdev' score is # stdevs from target mean
# 'miss' feature absent:
# 'constant' score = scoring['constant']
# 'mean_mult' score = mean * scoring['mean_mult']
#
self.scoring = {}
self.name = name
################################################################
# ignore scores
ignore_score = { "hit": "ignore" }
self.glossary["n_spikes"] = "Number of spikes"
self.scoring["n_spikes"] = ignore_score
################################################################
# ignore misses
ignore_miss = { "hit":"stdev", "miss":"const", "const":0 }
self.glossary["adapt"] = "Adaptation index"
self.scoring["adapt"] = ignore_miss
self.glossary["latency"] = "Time to first spike (ms)"
self.scoring["latency"] = ignore_miss
################################################################
# base miss off mean
mean_score = { "hit":"stdev", "miss":"mean_mult", "mean_mult":2 }
self.glossary["ISICV"] = "ISI-CV"
self.scoring["ISICV"] = mean_score
################################################################
# normal scoring
normal_score = { "hit":"stdev", "miss":"const", "const":20 }
self.glossary["isi_avg"] = "Average ISI (ms)"
self.scoring["isi_avg"] = ignore_score
self.glossary["doublet"] = "Doublet ISI (ms)"
self.scoring["doublet"] = normal_score
self.glossary["f_fast_ahp"] = "Fast AHP (mV)"
self.scoring["f_fast_ahp"] = normal_score
self.glossary["f_slow_ahp"] = "Slow AHP (mV)"
self.scoring["f_slow_ahp"] = normal_score
self.glossary["f_slow_ahp_time"] = "Slow AHP time"
self.scoring["f_slow_ahp_time"] = normal_score
self.glossary["base_v"] = "Baseline voltage (mV)"
self.scoring["base_v"] = normal_score
#self.glossary["base_v2"] = "Baseline voltage 2 (mV)"
#self.scoring["base_v2"] = normal_score
#self.glossary["base_v3"] = "Baseline voltage 3 (mV)"
#self.scoring["base_v3"] = normal_score
################################################################
# per spike scoring
perspike_score = { "hit":"perspike", "miss":"const", "const":20, "skip_last_n":0 }
self.glossary["f_peak"] = "Spike height (mV)"
self.scoring["f_peak"] = perspike_score.copy()
self.glossary["f_trough"] = "Spike depth (mV)"
self.scoring["f_trough"] = perspike_score.copy()
self.scoring["f_trough"]["skip_last_n"] = 1
# self.glossary["f_w"] = "Spike width at -30 mV (ms)"
# self.scoring["f_w"] = perspike_score.copy()
self.glossary["upstroke"] = "Peak upstroke (mV/ms)"
self.scoring["upstroke"] = perspike_score.copy()
self.glossary["upstroke_v"] = "Vm of peak upstroke (mV)"
self.scoring["upstroke_v"] = perspike_score.copy()
self.glossary["downstroke"] = "Peak downstroke (mV/ms)"
self.scoring["downstroke"] = perspike_score.copy()
self.glossary["downstroke_v"] = "Vm of peak downstroke (mV)"
self.scoring["downstroke_v"] = perspike_score.copy()
self.glossary["threshold"] = "Threshold voltage (mV)"
self.scoring["threshold"] = perspike_score.copy()
self.glossary["width"] = "Spike width at half-max (ms)"
self.scoring["width"] = perspike_score.copy()
self.scoring["width"]["skip_last_n"] = 1
self.glossary["thresh_ramp"] = "Change in dv/dt over first 5 mV past threshold (mV/ms)"
self.scoring["thresh_ramp"] = perspike_score.copy()
################################################################
# heavily penalize when there are no spikes
spike_score = { "hit":"stdev", "miss":"const", "const":250 }
self.glossary["rate"] = "Firing rate (Hz)"
self.scoring["rate"] = spike_score
def print_out(self):
print(("Features from " + self.name))
for k in list(self.mean.keys()):
if k in self.glossary:
st = "%30s = " % self.glossary[k]
if self.mean[k] is not None:
st += "%g" % self.mean[k]
else:
st += "--------"
if k in self.stdev and self.stdev[k] is not None:
st += " +/- %g" % self.stdev[k]
print(st)
# initialize summary feature set from file
def clone(self, param_dict):
for k in list(param_dict.keys()):
self.mean[k] = param_dict[k]["mean"]
self.stdev[k] = param_dict[k]["stdev"]
class EphysFeatureExtractor( object ):
def __init__(self):
# list of feature set instances
self.feature_list = []
# names of each element in feature list
self.feature_source = []
# feature set object representing combination of all instances
self.summary = None
# adds new feature set instance to feature_list
def process_instance(self, name, v, curr, t, onset, dur, stim_name):
feature = EphysFeatures(name)
################################################################
# set stop time -- run until end of stimulus or end of sweep
# comment-out the one of the two approaches
# detect spikes only during stimulus
start = onset
stop = onset + dur
# detect spikes for all of sweep
#start = 0
#stop = t[-1]
################################################################
# pull out spike times
# calculate the derivative only within target window
# otherwise get spurious detection at ends of stimuli
# filter with 10kHz cutoff if constant 200kHz sample rate (ie experimental trace)
start_idx = np.where(t >= start)[0][0]
stop_idx = np.where(t >= stop)[0][0]
v_target = v[start_idx:stop_idx]
if np.abs(t[1] - t[0] - 5e-6) < 1e-7 and np.var(np.diff(t)) < 1e-6:
b, a = signal.bessel(4, 0.1, "low")
smooth_v = signal.filtfilt(b, a, v_target, axis=0)
dv = np.diff(smooth_v)
else:
dv = np.diff(v_target)
dvdt = dv / (np.diff(t[start_idx:stop_idx]) * 1e3) # in mV/ms
dv_cutoff = 20
thresh_pct = 0.05
spikes = []
temp_spk_idxs = np.where(np.diff(np.greater_equal(dvdt, dv_cutoff).astype(int)) == 1)[0] # find positive-going crossings of 100 mV/ms
spk_idxs = []
for i, temp in enumerate(temp_spk_idxs):
if i == 0:
spk_idxs.append(temp)
elif np.any(dvdt[temp_spk_idxs[i - 1]:temp] < 0):
# check if the dvdt has gone back down below zero between presumed spike times
# sometimes the dvdt bobbles around detection threshold and produces spurious guesses at spike times
spk_idxs.append(temp)
spk_idxs += start_idx # set back to the "index space" of the original trace
# recalculate full dv/dt for feature analysis (vs spike detection)
if np.abs(t[1] - t[0] - 5e-6) < 1e-7 and np.var(np.diff(t)) < 1e-6:
b, a = signal.bessel(4, 0.1, "low")
smooth_v = signal.filtfilt(b, a, v, axis=0)
dv = np.diff(smooth_v)
else:
dv = np.diff(v)
dvdt = dv / (np.diff(t) * 1e3) # in mV/ms
# First time through, accumulate upstrokes to calculate average threshold target
for spk_n, spk_idx in enumerate(spk_idxs):
# Etay defines spike as time of threshold crossing
spk = {}
if spk_n < len(spk_idxs) - 1:
next_idx = spk_idxs[spk_n + 1]
else:
next_idx = stop_idx
if spk_n > 0:
prev_idx = spk_idxs[spk_n - 1]
else:
prev_idx = start_idx
# Find the peak
peak_idx = np.argmax(v[spk_idx:next_idx]) + spk_idx
spk["peak_idx"] = peak_idx
spk["f_peak"] = v[peak_idx]
spk["f_peak_i"] = curr[peak_idx]
spk["f_peak_t"] = t[peak_idx]
# Check if end of stimulus interval cuts off spike - if so, don't process spike
if spk_n == len(spk_idxs) - 1 and peak_idx == next_idx-1:
continue
if spk_idx == peak_idx:
continue # this was bugfix, but why? ramp?
# Determine maximum upstroke of spike
upstroke_idx = np.argmax(dvdt[spk_idx:peak_idx]) + spk_idx
spk["upstroke"] = dvdt[upstroke_idx]
if np.isnan(spk["upstroke"]): # sometimes dvdt will be NaN because of multiple cvode points at same time step
close_idx = upstroke_idx + 1
while (np.isnan(dvdt[close_idx])):
close_idx += 1
spk["upstroke_idx"] = close_idx
spk["upstroke"] = dvdt[close_idx]
spk["upstroke_v"] = v[close_idx]
spk["upstroke_i"] = curr[close_idx]
spk["upstroke_t"] = t[close_idx]
else:
spk["upstroke_idx"] = upstroke_idx
spk["upstroke_v"] = v[upstroke_idx]
spk["upstroke_i"] = curr[upstroke_idx]
spk["upstroke_t"] = t[upstroke_idx]
# Preliminarily define threshold where dvdt = 5% * max upstroke
thresh_pct = 0.05
find_thresh_idxs = np.where(dvdt[prev_idx:upstroke_idx] <= thresh_pct * spk["upstroke"])[0]
if len(find_thresh_idxs) < 1: # Can't find a good threshold value - probably a bad simulation case
# Fall back to the upstroke value
threshold_idx = upstroke_idx
else:
threshold_idx = find_thresh_idxs[-1] + prev_idx
spk["threshold_idx"] = threshold_idx
spk["threshold"] = v[threshold_idx]
spk["threshold_v"] = v[threshold_idx]
spk["threshold_i"] = curr[threshold_idx]
spk["threshold_t"] = t[threshold_idx]
spk["rise_time"] = spk["f_peak_t"] - spk["threshold_t"]
PERIOD = t[1] - t[0]
width_volts = (v[peak_idx] + v[threshold_idx]) / 2
recording_width = False
for i in range(threshold_idx, min(len(v), threshold_idx + int(0.001 / PERIOD))):
if not recording_width and v[i] >= width_volts:
recording_width = True
idx0 = i
elif recording_width and v[i] < width_volts:
spk["half_height_width"] = t[i] - t[idx0]
break
# </KEITH>
# Check for things that are probably not spikes:
# if there is more than 2 ms between the detection event and the peak, don't count it
if t[peak_idx] - t[threshold_idx] > 0.002:
continue
# if the "spike" is less than 2 mV, don't count it
if v[peak_idx] - v[threshold_idx] < 2.0:
continue
# if the absolute value of the peak is less than -30 mV, don't count it
if v[peak_idx] < -30.0:
continue
spikes.append(spk)
# Refine threshold target based on average of all spikes
if len(spikes) > 0:
threshold_target = np.array([spk["upstroke"] for spk in spikes]).mean() * thresh_pct
for spk_n, spk in enumerate(spikes):
if spk_n < len(spikes) - 1:
next_idx = spikes[spk_n + 1]["threshold_idx"]
else:
next_idx = stop_idx
if spk_n > 0:
prev_idx = spikes[spk_n - 1]["peak_idx"]
else:
prev_idx = start_idx
# Restore variables from before
# peak_idx = spk['peak_idx']
peak_idx = | np.argmax(v[spk['threshold_idx']:next_idx]) | numpy.argmax |
"""Defines the GUI IO file for Usm3d."""
import os
from collections import defaultdict, OrderedDict
import numpy as np
from pyNastran.utils import object_attributes
from pyNastran.utils.numpy_utils import integer_float_types
from pyNastran.converters.usm3d.usm3d_reader import Usm3d
from pyNastran.converters.usm3d.time_accurate_results import get_n_list
from pyNastran.gui.gui_objects.gui_result import GuiResult
from pyNastran.gui.utils.vtk.vtk_utils import (
create_vtk_cells_of_constant_element_type, numpy_to_vtk_points)
class Usm3dIO:
def __repr__(self):
return '<Usm3dIO class>'
def __init__(self, gui):
self.gui = gui
assert gui is not None
def get_usm3d_wildcard_geometry_results_functions(self):
data = ('Usm3D',
'USM3D (*.cogsg; *.front)', self.load_usm3d_geometry,
'Usm3d (*.flo)', self.load_usm3d_results)
return data
def on_reload_usm3d(self):
"""
For USM3D, we dynamically load the latest CFD results time step,
hich is really handy when you're running a job.
"""
# minimum is 1
nstep = 100
if self.gui.out_filename is None:
msg = 'usm3d_filename=%r must not be None\n' % self.gui.out_filename
dir_gui = []
for key in object_attributes(self.gui):
try:
value = getattr(self.gui, key)
except KeyError:
# self.edge_actor is a
if key not in ['edge_actor']:
self.gui.log.warning('key=%s is undefined...' % key)
if isinstance(value, (integer_float_types, str)):
dir_gui.append(key)
dir_gui.sort()
msg += 'dir(gui) = [%s]' % ', '.join(dir_gui)
raise RuntimeError(msg)
flo_filename = self.gui.out_filename
dirname = os.path.dirname(flo_filename)
if dirname == '':
dirname = os.getcwd()
basename = os.path.basename(flo_filename)
base = os.path.splitext(basename)[0]
# box.flo -> box_100.flo
if '_' in base:
model_name, n = base.rsplit('_', 1)
#print("model_name=%r n=%r" % (model_name, n))
n = int(n)
n_list = get_n_list(dirname, model_name)
inn = n_list.index(n)
if inn+nstep < len(n_list):
nnew = n_list[inn+nstep]
else:
nnew = max(n_list)
if nnew == n:
raise RuntimeError('%r is the last file' % self.gui.out_filename)
#print("inn=%r nnew=%r" % (inn, nnew))
flo_filename = model_name + '_%s.flo' % nnew
else:
flo_filename = self.gui.out_filename
#msg = (
#'The current file is must have the format of '
#'xxx_%%i.flo, not %r' % self.out_filename)
#raise RuntimeError(msg)
#print("loading %r" % flo_filename)
self.load_usm3d_results(flo_filename)
self.gui.out_filename = os.path.join(dirname, flo_filename)
#print("done stepping...")
#def _get_next_n(self, base):
#n = int(n)
## get the max N value
#nmax = -1
#for flo_filename in flo_filenames:
#base, ext = os.path.splitext(flo_filename)
#if ext == '.flo':
#n = base.split('_')[-1]
#try: # get the incrementation index
#n = int(n)
#if n > nold:
#return n
#except:
#raise NotImplementedError()
#return None
def load_usm3d_results(self, flo_filename):
model = Usm3d(log=self.gui.log, debug=False)
npoints = self.gui.nnodes
unused_node_ids_volume, loads = model.read_flo(flo_filename, n=npoints)
cases = self.gui.result_cases
form = self.gui.get_form()
bcs = None
mapbc = None
bcmap_to_bc_name = None
self._fill_usm3d_results(cases, form,
bcs, mapbc, bcmap_to_bc_name, loads,
is_geometry=False)
def load_usm3d_geometry(self, cogsg_filename, name='main', plot=True):
model_name = name
skip_reading = self.gui._remove_old_geometry(cogsg_filename)
if skip_reading:
return
self.gui.eid_maps[name] = {}
self.gui.nid_maps[name] = {}
model = Usm3d(log=self.gui.log, debug=False)
base_filename, ext = os.path.splitext(cogsg_filename)
#node_filename = base_filename + '.node'
#ele_filename = base_filename + '.ele'
if ext == '.cogsg':
dimension_flag = 3
#elif ext == '.ele':
#dimension_flag = 3
else:
raise RuntimeError('unsupported extension. Use "cogsg" or "front".')
read_loads = True
nodes, tris_tets, tris, bcs, mapbc, loads, flo_filename = model.read_usm3d(
base_filename, dimension_flag, read_loads=read_loads)
del tris_tets
nodes = model.nodes
tris = model.tris
tets = model.tets
bcs = model.bcs
mapbc = model.mapbc
loads = model.loads
self.gui.out_filename = None
if flo_filename is not None:
self.gui.out_filename = flo_filename
bcmap_to_bc_name = model.bcmap_to_bc_name
self.gui.nnodes = nodes.shape[0]
ntris = 0
ntets = 0
if tris is not None:
ntris = tris.shape[0]
if dimension_flag == 2:
pass
elif dimension_flag == 3:
ntets = tets.shape[0]
ntets = 0
else:
raise RuntimeError()
self.gui.nelements = ntris + ntets
self.gui.log.debug("nnodes = %i" % self.gui.nnodes)
self.gui.log.debug("nelements = %i" % self.gui.nelements)
grid = self.gui.grid
grid.Allocate(self.gui.nelements, 1000)
self.gui.nid_map = {}
self.gui.eid_map = {}
assert nodes is not None
nnodes = nodes.shape[0]
node_ids = np.arange(1, nnodes + 1, dtype='int32')
points = numpy_to_vtk_points(nodes)
if ntris:
element_ids = np.arange(1, ntris + 1, dtype='int32')
etype = 5 # vtkTriangle().GetCellType()
create_vtk_cells_of_constant_element_type(grid, tris, etype)
else:
ntets = tets.shape[0]
element_ids = np.arange(1, ntets + 1, dtype='int32')
if dimension_flag == 2:
pass
elif dimension_flag == 3:
if ntets:
etype = 10 # vtkTetra().GetCellType()
assert tets.max() > 0, tets.min()
create_vtk_cells_of_constant_element_type(grid, tets, etype)
else:
raise RuntimeError('dimension_flag=%r' % dimension_flag)
grid.SetPoints(points)
grid.Modified()
self.gui.node_ids = node_ids
self.gui.element_ids = element_ids
# regions/loads
self.gui.scalar_bar_actor.Modified()
cases = OrderedDict()
form = []
form, cases = self._fill_usm3d_results(cases, form,
bcs, mapbc, bcmap_to_bc_name, loads,
is_geometry=True)
self.gui._finish_results_io2(model_name, form, cases)
def clear_usm3d(self):
"""dummy function"""
pass
def _fill_usm3d_results(self, cases, form,
bcs, mapbc, bcmap_to_bc_name, loads,
is_geometry=True):
"""sets up usm3d results"""
if 'Mach' in loads:
avg_mach = loads['Mach'].mean()
note = ': avg(Mach)=%g' % avg_mach
else:
note = ''
self.gui.isubcase_name_map = {
1: ['Usm3d%s' % note, ''],
2: ['Usm3d%s' % note, ''],
}
form, cases = self._fill_usm3d_case(
cases, form,
bcs, mapbc, bcmap_to_bc_name, loads,
is_geometry=is_geometry)
return form, cases
def _fill_usm3d_case(self, cases, form,
bcs, mapbc, bcmap_to_bc_name, loads, is_geometry=True):
"""actually fills the sidebar"""
self.gui.scalar_bar_actor.VisibilityOff()
colormap = self.gui.settings.colormap
subcasemap_id = 1
icase = len(cases)
itime = 0
if is_geometry:
assert self.gui.element_ids is not None, self.gui.element_ids
assert len(self.gui.element_ids) > 0, self.gui.element_ids
eid_res = GuiResult(
subcasemap_id, 'ElementID', 'ElementID', 'centroid', self.gui.element_ids,
nlabels=None, labelsize=None, ncolors=None, colormap=colormap,
data_format='%i', uname='GuiResult')
cases[icase] = (eid_res, (itime, 'ElementID'))
form.append(('ElementID', icase, []))
icase += 1
if bcs is not None:
region_res = GuiResult(
subcasemap_id, 'Patch', 'Patch', 'centroid', bcs, # patch_id
nlabels=None, labelsize=None, ncolors=None, colormap=colormap,
data_format='%i', uname='GuiResult')
cases[icase] = (region_res, (itime, 'Patch'))
form.append(('Patch', icase, []))
icase += 1
if bcs is not None:
patch_id = bcs
form += [
('BC', icase, []),
('Family', icase + 1, []),
]
bc_value = np.zeros(bcs.shape, dtype='int32')
family = np.zeros(bcs.shape, dtype='int32')
mapbc_print = defaultdict(list)
for region, mapi in sorted(mapbc.items()):
bcnum = mapi[0]
familyi = mapi[1]
mapbc_print[bcnum].append(region)
try:
name = bcmap_to_bc_name[bcnum]
except KeyError:
name = '???'
#self.log.info('Region=%i BC=%s name=%r' % (region, bcnum, name))
ipatch = | np.where(patch_id == region) | numpy.where |
'''
=====================================
Author : <NAME>
Date : Feb. 16, 2020
Location: UC San Diego, La Jolla, CA
=====================================
'''
import numpy as np
import cvxpy as cp
class OptionsClass:
"""
Options Class
"""
def __init__(self):
self.options = None
self.solverName = 'None'
def set_option(self, key, value):
try:
if type(value) is self.options[key][2]:
self.options[key][0] = value
else:
print(f"The type of value for the keyword '{key}' should be '{self.options[key][2]}'.")
except:
raise ValueError('Incorrect option keyword or type: ' + key)
def get_option(self, key):
try:
value = self.options[key][0]
return value
except:
raise ValueError('Incorrect option keyword: ' + key)
def reset_options(self, key):
try:
self.options[key] = self.options[key][1]
except:
raise ValueError('Incorrect option keyword: ' + key)
class CbfClfQpOptions(OptionsClass):
def __init__(self):
OptionsClass.__init__(self)
self.setup()
self.solver_name = 'CBF-CLF'
def setup(self):
self.options = {
# [Current value, default value, type]
'u_max': [None, None, np.ndarray],
'u_min': [None, None, np.ndarray],
'clf_lambda': [None, 5, float],
'cbf_gamma': [None, 5, float],
'weight_input': [None, None, np.ndarray],
'weight_slack': [None, 2e-2, float],
}
# def define_slack(self):
# TODO
class CbfClfQp:
"""
This is the implementation of the vanilla CBF-CLF-QP method. The optimization problem is:
min (u-u_ref).T * H * (u-u_ref) + p * delta**2
s.t. L_f V(x) + L_g V(x) * u + lambda * V(x) <= delta ---> CLF constraint
L_f B(x) + L_g B(x) * u + gamma * B(x) >= 0 ---> CBF constraint
Input:
:param system : The dynamic system of interest, containing CBF, CLF, and their Lie derivatives
:param x : The current state x
:param u_ref : The reference control input
:param slack : The slack activated or not, 1 -> activate while 0 -> not activate
:param verbose : Show the optimization log or not
"""
def __init__(self, system, option_class):
if hasattr(system, 'udim'):
self.udim = system.udim
else:
raise KeyError('udim is not given in the system dynamic!')
self.cbf = system.cbf
# todo check lf.lg/cbf clfs symbolic expression and their size!
self.lf_cbf = system.lf_cbf
self.lg_cbf = system.lg_cbf
self.clf = system.clf
self.lf_clf = system.lf_clf
self.lg_clf = system.lg_clf
# todo take input from the option class
self.weight_input = np.atleast_2d(option_class.get_option('weight_input'))
self.weight_slack = np.atleast_2d(option_class.get_option('weight_slack'))
self.H = None
self.slack_H = None
# todo
self.A = None
self.b = None
# Hyperparameters: CLF <- Lambda & CBF <- Gamma
self.clf_lambda = option_class.get_option('clf_lambda')
self.cbf_gamma = option_class.get_option('cbf_gamma')
self.u_max = option_class.get_option('u_max')
if self.u_max.shape != (self.udim,):
raise ValueError('The size of u_max should be udim-by-, a one dimensional vector in python.')
self.u_min = option_class.get_option('u_min')
if self.u_min.shape != (self.udim,):
raise ValueError('The size of u_min should be udim-by-, a one dimensional vector in python.')
self.with_slack = None
def cbf_clf_qp(self, x, u_ref=None, with_slack=1, verbose=0):
"""
:param x : The current state
:param u_ref : A real number of 1D vector with shape (udim,)
:param with_slack: Indicator if there is slack variable
:param verbose : Indicator if QP info is displayed
:return:
"""
inf = np.inf
self.with_slack = with_slack
slack = None
if u_ref is None:
u_ref = np.zeros(self.udim)
else:
if u_ref.shape != (self.udim,):
raise ValueError(f'u_ref should have the shape size (u_dim,), now it is {u_ref.shape}')
# Read the weight input and build up the matrix H in the cost function
if self.weight_input.shape == (1, 1):
# Weight input is a scalar
self.H = self.weight_input * np.eye(self.udim)
elif self.weight_input.shape == (self.udim, 1):
# Weight_input is a vector, use it to form the diagonal of the H matrix
self.H = np.diag(self.weight_input)
elif self.weight_input.shape == (self.udim, self.udim):
# Weight_input is a udim * udim matrix
self.H = np.copy(self.weight_input)
else:
self.H = np.eye(self.udim)
V = self.clf(x)
lf_V = self.lf_clf(x)
lg_V = self.lg_clf(x)
B = self.cbf(x)
lf_B = self.lf_cbf(x)
lg_B = self.lg_cbf(x)
if self.with_slack:
# slack variable is activated
# Constraints: A [u; slack] <= b
# LfV + LgV * u + lambda * V <= slack
# LfB + LgB * u + gamma * B >= 0
lg_V = np.hstack((lg_V, - | np.ones((1, 1)) | numpy.ones |
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def knee_pt(y, x=None):
x_was_none = False
use_absolute_dev_p = True
res_x = np.nan
idx_of_result = np.nan
if type(y) is not np.ndarray:
print('knee_pt: y must be a numpy 1D vector')
return res_x, idx_of_result
else:
if y.ndim >= 2:
print('knee_pt: y must be 1 dimensional')
return res_x, idx_of_result
if np.size(y) == 0:
print('knee_pt: y can not be an empty vector')
return res_x, idx_of_result
else:
if x is None:
x_was_none = True
x = np.arange(1, np.amax(y.shape) + 1, dtype=np.int)
if x.shape != y.shape:
print('knee_pt: y and x must have the same dimensions')
return res_x, idx_of_result
if y.size < 3:
res_x, idx_of_result = np.min(y), np.argmin(y)
return res_x, idx_of_result
if np.all(np.diff(x) >= 0) and (not x_was_none):
idx = np.argsort(x)
y = np.sort(y)
x = np.sort(x)
else:
idx = np.arange(0, np.amax(x.shape))
sigma_xy = np.cumsum(np.multiply(x, y), axis=0)
sigma_x = np.cumsum(x, axis=0)
sigma_y = | np.cumsum(y, axis=0) | numpy.cumsum |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path
from os import stat
import cv2.cv2
import torch
from typing import Any, List, Optional, Union
import numpy as np
from gym import spaces
import habitat_sim
import numpy as np
import scipy.ndimage as nd
from matplotlib.transforms import Affine2D
from mapper.map import convert_midlevel_to_map
from mapper.mid_level.decoder import UpResNet
from mapper.mid_level.encoder import mid_level_representations
from mapper.mid_level.fc import FC
from mapper.transform import egomotion_transform
from mapper.update import update_map
try:
import cupy
import cupyx.scipy.ndimage as ndc
CUPYAVAILABLE = True
print('Using cupyx')
except ImportError:
print("cuda not enabled for affine transforms")
CUPYAVAILABLE = False
import habitat
from config.config import MAP_DIMENSIONS, MAP_SIZE, MAP_DOWNSAMPLE, DATASET_SAVE_PERIOD, DATASET_SAVE_FOLDER, \
START_IMAGE_NUMBER, MID_LEVEL_DIMENSIONS, DEBUG, REPRESENTATION_NAMES, device, RESIDUAL_LAYERS_PER_BLOCK, \
RESIDUAL_NEURON_CHANNEL, RESIDUAL_SIZE, STRIDES, BATCHSIZE
from habitat.core.dataset import Episode
from habitat.core.logging import logger
from habitat.core.registry import registry
from habitat.core.simulator import (
AgentState,
Config,
DepthSensor,
Observations,
RGBSensor,
SemanticSensor,
Sensor,
SensorSuite,
ShortestPathPoint,
Simulator,
SensorTypes,
)
from habitat.core.spaces import Space
from habitat.utils import profiling_utils
from habitat.utils.visualizations import fog_of_war, maps
import matplotlib.pyplot as plt
from habitat.utils.visualizations.maps import quat_to_angle_axis
RGBSENSOR_DIMENSION = 3
def overwrite_config(config_from: Config, config_to: Any) -> None:
r"""Takes Habitat-API config and Habitat-Sim config structures. Overwrites
Habitat-Sim config with Habitat-API values, where a field name is present
in lowercase. Mostly used to avoid :ref:`sim_cfg.field = hapi_cfg.FIELD`
code.
Args:
config_from: Habitat-API config node.
config_to: Habitat-Sim config structure.
"""
def if_config_to_lower(config):
if isinstance(config, Config):
return {key.lower(): val for key, val in config.items()}
else:
return config
for attr, value in config_from.items():
if hasattr(config_to, attr.lower()):
setattr(config_to, attr.lower(), if_config_to_lower(value))
def check_sim_obs(obs, sensor):
assert obs is not None, (
"Observation corresponding to {} not present in "
"simulator's observations".format(sensor.uuid)
)
@registry.register_sensor
class HabitatSimRGBSensor(RGBSensor):
sim_sensor_type: habitat_sim.SensorType
def __init__(self, sim, config):
self._sim = sim
self.sim_sensor_type = habitat_sim.SensorType.COLOR
super().__init__(config=config)
self.image_number = 0
self.prev_pose = None
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=255,
shape=(self.config.HEIGHT, self.config.WIDTH, RGBSENSOR_DIMENSION),
dtype=np.uint8,
)
def get_observation(self, sim_obs):
obs = sim_obs.get(self.uuid, None)
check_sim_obs(obs, self)
# remove alpha channel
obs = obs[:, :, :RGBSENSOR_DIMENSION]
# if self.image_number % DATASET_SAVE_PERIOD == 0:
# print('Saving RGB image: ', self.image_number)
# plt.imsave(os.path.join(DATASET_SAVE_FOLDER, 'images', f'rgb_{self.current_scene_name}_{str((self.image_number // DATASET_SAVE_PERIOD) + START_IMAGE_NUMBER)}.jpeg'), obs)
self.image_number = self.image_number + 1
return obs
@registry.register_sensor(name='MIDLEVEL')
class HabitatSimMidLevelSensor(Sensor):
""" Holds mid level encodings """
sim_sensor_type: habitat_sim.SensorType
def __init__(self, sim, config):
self._sim = sim
self.sim_sensor_type = habitat_sim.SensorType.NONE
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return 'midlevel'
def _get_sensor_type(self, *args: Any, **kwargs: Any) -> SensorTypes:
return self.sim_sensor_type
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=255,
shape=MID_LEVEL_DIMENSIONS,
dtype=np.uint8,
)
def get_observation(self, sim_obs):
obs = sim_obs.get('rgb', None)
check_sim_obs(obs, self)
# remove alpha channel
obs = obs[:, :, :RGBSENSOR_DIMENSION]
obs = torch.Tensor(obs)
obs = obs.to(device)
obs = torch.transpose(obs, 0, 2)
obs = obs.unsqueeze(0)
if DEBUG:
print(f"Encoding image of shape {obs.shape} with mid level encoders.")
obs = mid_level_representations(obs, REPRESENTATION_NAMES)
if DEBUG:
print(f'Returning encoded representation of shape {obs.shape}.')
sim_obs['midlevel'] = obs
obs = obs[0, :, :, :]
return obs
@registry.register_sensor(name="EGOMOTION")
class AgentPositionSensor(Sensor):
def __init__(self, sim, config):
self.sim_sensor_type = habitat_sim.SensorType.NONE
super().__init__(config=config)
self._sim = sim
self.prev_pose = None
# Defines the name of the sensor in the sensor suite dictionary
def _get_uuid(self, *args, **kwargs):
return "egomotion"
# Defines the type of the sensor
def _get_sensor_type(self, *args, **kwargs):
return self.sim_sensor_type
# Defines the size and range of the observations of the sensor
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1,1,3),
dtype=np.float32,
)
# This is called whenver reset is called or an action is taken
def get_observation(self, sim_obs) -> Any:
pos = (self._sim.get_agent_state().position[0],self._sim.get_agent_state().position[2])
sim_quat = self._sim.get_agent_state().rotation
alpha = -quat_to_angle_axis(sim_quat)[0] + np.pi/2
state = np.array([pos[0],pos[1],alpha])
if self.prev_pose is None:
self.prev_pose = state
initial_displacement = torch.Tensor(np.zeros((1, 1, 3)))
initial_displacement = initial_displacement.to(device)
sim_obs['egomotion'] = initial_displacement
return initial_displacement
world_displacement = state - self.prev_pose # displacement in the world frame
world_to_robot_transformation_matrix = Affine2D().rotate_around(0, 0, np.pi/2-self.prev_pose[2]).get_matrix() # negative rotation to compensate for positive rotation
robot_displacement = torch.Tensor(world_to_robot_transformation_matrix @ world_displacement)
robot_displacement = robot_displacement.to(device)
robot_displacement = torch.unsqueeze(robot_displacement, 0)
robot_displacement = torch.unsqueeze(robot_displacement, 0)
self.prev_pose = state
sim_obs['egomotion'] = robot_displacement
return robot_displacement
@registry.register_sensor(name='MIDLEVEL_MAP_SENSOR')
class HabitatSimMidLevelMapSensor(Sensor):
""" Holds the map generated from mid level representations. """
sim_sensor_type: habitat_sim.SensorType
def __init__(self, sim, config):
self._sim = sim
self.sim_sensor_type = habitat_sim.SensorType.NONE
super().__init__(config=config)
# zero confidence, so this is not taken into account in first map update.
self.previous_map = torch.zeros((BATCHSIZE, *MAP_DIMENSIONS))
self.previous_map = self.previous_map.to(device)
# self.previous_map.requires_grad_(True)
self.fc = FC()
self.fc.to(device)
self.upresnet = UpResNet(
layers=RESIDUAL_LAYERS_PER_BLOCK,
channels=RESIDUAL_NEURON_CHANNEL,
sizes=RESIDUAL_SIZE,
strides=STRIDES
)
self.upresnet.to(device)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return 'midlevel_map'
def _get_sensor_type(self, *args: Any, **kwargs: Any) -> SensorTypes:
return self.sim_sensor_type
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=0,
high=1,
shape=MAP_DIMENSIONS,
dtype=np.float32,
)
def get_observation(self, sim_obs):
# return previous map for policy, but ensure to calculate the new map for the next update
return_value = self.previous_map.clone()
midlevel_obs = sim_obs["midlevel"]
egomotion_obs = sim_obs["egomotion"]
decoded_map = convert_midlevel_to_map(midlevel_obs, self.fc, self.upresnet)
dx = egomotion_obs
previous_map = egomotion_transform(self.previous_map, dx)
with torch.no_grad():
new_map = update_map(decoded_map, previous_map)
self.previous_map = new_map
return return_value[0, :, :, :]
@registry.register_sensor(name='MAP_SENSOR')
class HabitatSimMapSensor(Sensor):
sim_sensor_type: habitat_sim.SensorType
"""
Custom class to create a map sensor.
"""
def __init__(self, sim, config):
# self.sim_sensor_type = habitat_sim.SensorType.TENSOR ----> TENSOR DOESN'T EXIST IN 2019 TENSORFLOW :(
self.sim_sensor_type = habitat_sim.SensorType.COLOR
super().__init__(config=config)
self._sim = sim
self.image_number = 0
self.cone = self.vis_cone((MAP_DIMENSIONS[1], MAP_DIMENSIONS[2]), np.pi/1.1)
self.map_scale_factor = 4
self.map_upsample_factor = 2
self.global_map = None
self.origin = None
self.displacements = []
# Defines the name of the sensor in the sensor suite dictionary
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return 'map'
# Defines the type of the sensor
def _get_sensor_type(self, *args: Any, **kwargs: Any) -> SensorTypes:
return self.sim_sensor_type
# Defines the size and range of the observations of the sensor
def _get_observation_space(self, *args: Any, **kwargs: Any) -> Space:
return spaces.Box(
low=0,
high=1,
shape=(MAP_DIMENSIONS[1], MAP_DIMENSIONS[2], MAP_DIMENSIONS[0]),
dtype=np.uint8,
)
def vis_cone(self, map_size, fov):
cone = np.zeros(map_size)
ci = np.floor(map_size[0]/2)
cj = np.floor(map_size[1]/2)
for ii in range(map_size[0]):
for jj in range(map_size[1]):
di = ii - ci
dj = jj - cj
angle = | np.arctan2(dj, -di) | numpy.arctan2 |
import numpy as np
import matplotlib.pyplot as plt
from skimage.measure import label, regionprops
import skimage.io as io
import skimage.filters as filt
import skimage.morphology as morph
from skimage.draw import disk
import amglib.imageutils as amg
import pandas as pd
from skimage.feature import match_template
def get_dot_template(img, roi,se=morph.disk(5)) :
"""
Extracts sub image from the region given by a ROI
Parameters
----------
img : ndarray (2D)
The source image
roi : array
Region of interest to extract. Organized as (x0,y0,x1,y1)
se : ndarray (2D)
Structure element for the median filter used to remove outliers from the template.
Returns
-------
ndarray (2D)
The template image
"""
template=filt.median(img[roi[0]:roi[2],roi[1]:roi[3]],se)
return template
def get_black_bodies(img, greythres,areas = [2000,4000],R=2) :
bb=img<greythres
lbb=label(bb)
plt.imshow(bb)
mask = | np.zeros(lbb.shape) | numpy.zeros |
"""
File: waveforms_flux_dev.py
Author: <NAME>
Purpose: generate flux CZ gate waveforms
Prerequisites:
Usage:
Bugs:
"""
import numpy as np
import logging
log = logging.getLogger(__name__)
def victor_waveform(
fluxlutman,
which_gate: str,
sim_ctrl_cz=None,
return_dict=False,
force_start_end_swtspt=True,
):
# NB: the ramps are extra time, they are NOT substracted from sq_length!
amp_at_sweetspot = 0.0
amp_at_int_11_02 = fluxlutman.calc_eps_to_amp(
0, state_A="11", state_B="02", which_gate=which_gate
) / ( fluxlutman.cfg_awg_channel_range() / 2 * fluxlutman.cfg_awg_channel_amplitude() )
if fluxlutman.get("czv_fixed_amp_{}".format(which_gate)):
amp_at_int_11_02 = 0.5
sampling_rate = fluxlutman.sampling_rate()
# New parameters specific to this parameterization
time_ramp_middle = fluxlutman.get("czv_time_ramp_middle_{}".format(which_gate))
time_ramp_outside = fluxlutman.get("czv_time_ramp_outside_{}".format(which_gate))
speed_limit = fluxlutman.get("czv_speed_limit_{}".format(which_gate))
total_time = fluxlutman.get("czv_total_time_{}".format(which_gate))
invert_polarity = fluxlutman.get("czv_invert_polarity_{}".format(which_gate))
norm_sq_amp_par = fluxlutman.get("czv_sq_amp_{}".format(which_gate))
time_q_ph_corr = fluxlutman.get("czv_time_q_ph_corr_{}".format(which_gate))
amp_q_ph_corr = fluxlutman.get("czv_amp_q_ph_corr_{}".format(which_gate))
dt = 1 / sampling_rate
half_time_ramp_middle = time_ramp_middle / 2.0
half_time_sq = speed_limit / 2.0
half_time_q_ph_corr = time_q_ph_corr / 2.0
half_time_at_swtspt = (
total_time - time_ramp_middle - 2 * time_ramp_outside - speed_limit
) / 2.0
if half_time_at_swtspt < 0:
raise ValueError(
"Total time is not enough to accomodate for speed "
"limit and pulse ramps!"
)
half_total_time = (
half_time_at_swtspt + half_time_ramp_middle + half_time_sq + time_ramp_outside
)
time = np.arange(0.0, half_total_time, dt)
t1 = half_time_at_swtspt
t2 = t1 + half_time_ramp_middle
t3 = t2 + half_time_sq
conditions = [time <= t1, time > t1, time >= t2, time > t3]
funcs = [
lambda x: amp_at_sweetspot,
lambda x: (x - half_time_at_swtspt) * norm_sq_amp_par / half_time_ramp_middle,
lambda x: norm_sq_amp_par,
lambda x: -(x - t3) * norm_sq_amp_par / time_ramp_outside + norm_sq_amp_par,
]
half_NZ_amps = np.piecewise(time, conditions, funcs)
if fluxlutman.get("czv_correct_q_phase_{}".format(which_gate)):
# Insert extra square part to correct single qubit phase
insert_idx = np.where(half_NZ_amps >= amp_q_ph_corr)[0][-1] + 1
amps_q_phase_correction = np.full(int(half_time_q_ph_corr / dt), amp_q_ph_corr)
half_NZ_amps = np.insert(half_NZ_amps, insert_idx, amps_q_phase_correction)
amp = np.concatenate((np.flip(half_NZ_amps, 0), -half_NZ_amps[1:]))
# Extra points for starting and finishing at the sweetspot
if force_start_end_swtspt and amp[0] != 0.0:
amp = np.concatenate(([amp_at_sweetspot], amp, [amp_at_sweetspot]))
if invert_polarity:
amp = -amp
amp = amp_at_int_11_02 * amp
tlist = np.cumsum(np.full(len(amp) - 1, dt))
tlist = np.concatenate(([0.0], tlist)) # Set first point to have t=0
# Extra processing in case we are generating waveform for simulations
if sim_ctrl_cz is not None:
dt_num = np.size(tlist) - 1
dt_num_interp = dt_num * sim_ctrl_cz.simstep_div() + 1
time_interp = | np.linspace(tlist[0], tlist[-1], dt_num_interp) | numpy.linspace |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @file
# @author <NAME>
import numpy as np
import pytest
import scipp as sc
from .common import assert_export
def make_variables():
data = np.arange(1, 4, dtype=float)
a = sc.Variable(dims=['x'], values=data)
b = sc.Variable(dims=['x'], values=data)
a_slice = a['x', :]
b_slice = b['x', :]
return a, b, a_slice, b_slice, data
def test_astype():
var = sc.Variable(dims=['x'],
values=np.array([1, 2, 3, 4], dtype=np.int64),
unit='s')
assert var.dtype == sc.dtype.int64
assert var.unit == sc.units.s
for target_dtype in (sc.dtype.float64, float, 'float64'):
var_as_float = var.astype(target_dtype)
assert var_as_float.dtype == sc.dtype.float64
assert var_as_float.unit == sc.units.s
def test_astype_bad_conversion():
var = sc.Variable(dims=['x'], values=np.array([1, 2, 3, 4], dtype=np.int64))
assert var.dtype == sc.dtype.int64
for target_dtype in (sc.dtype.string, str, 'str'):
with pytest.raises(sc.DTypeError):
var.astype(target_dtype)
def test_astype_datetime():
var = sc.arange('x', np.datetime64(1, 's'), np.datetime64(5, 's'))
assert var.dtype == sc.dtype.datetime64
assert var.unit == sc.units.s
for target_dtype in (sc.dtype.datetime64, np.datetime64, 'datetime64',
'datetime64[s]'):
same = var.astype(target_dtype)
assert same.dtype == sc.dtype.datetime64
assert same.unit == sc.units.s
def test_astype_datetime_different_unit():
var = sc.arange('x', np.datetime64(1, 's'), np.datetime64(5, 's'))
assert var.unit == sc.units.s
with pytest.raises(sc.UnitError):
var.astype('datetime64[ms]')
def test_operation_with_scalar_quantity():
reference = sc.Variable(dims=['x'], values=np.arange(4.0) * 1.5)
reference.unit = sc.units.kg
var = sc.Variable(dims=['x'], values=np.arange(4.0))
var *= sc.scalar(1.5, unit=sc.units.kg)
assert sc.identical(reference, var)
def test_single_dim_access():
var = sc.Variable(dims=['x'], values=[0.0])
assert var.dim == 'x'
def test_0D_scalar_access():
var = sc.Variable(dims=(), values=0.0)
assert var.value == 0.0
var.value = 1.2
assert var.value == 1.2
assert var.values.shape == ()
assert var.values == 1.2
def test_0D_scalar_string():
var = sc.scalar('a')
assert var.value == 'a'
var.value = 'b'
assert sc.identical(var, sc.scalar('b'))
def test_1D_scalar_access_fail():
var = sc.empty(dims=['x'], shape=(1, ))
with pytest.raises(RuntimeError):
assert var.value == 0.0
with pytest.raises(RuntimeError):
var.value = 1.2
def test_1D_access_shape_mismatch_fail():
var = sc.empty(dims=['x'], shape=(2, ))
with pytest.raises(RuntimeError):
var.values = 1.2
def test_1D_access():
var = sc.empty(dims=['x'], shape=(2, ))
assert len(var.values) == 2
assert var.values.shape == (2, )
var.values[1] = 1.2
assert var.values[1] == 1.2
def test_1D_set_from_list():
var = sc.empty(dims=['x'], shape=(2, ))
var.values = [1.0, 2.0]
assert sc.identical(var, sc.Variable(dims=['x'], values=[1.0, 2.0]))
def test_1D_string():
var = sc.Variable(dims=['x'], values=['a', 'b'])
assert len(var.values) == 2
assert var.values[0] == 'a'
assert var.values[1] == 'b'
var.values = ['c', 'd']
assert sc.identical(var, sc.Variable(dims=['x'], values=['c', 'd']))
def test_1D_converting():
var = sc.Variable(dims=['x'], values=[1, 2])
var.values = [3.3, 4.6]
# floats get truncated
assert sc.identical(var, sc.Variable(dims=['x'], values=[3, 4]))
def test_1D_dataset():
var = sc.empty(dims=['x'], shape=(2, ), dtype=sc.dtype.Dataset)
d1 = sc.Dataset(data={'a': 1.5 * sc.units.m})
d2 = sc.Dataset(data={'a': 2.5 * sc.units.m})
var.values = [d1, d2]
assert sc.identical(var.values[0], d1)
assert sc.identical(var.values[1], d2)
def test_1D_access_bad_shape_fail():
var = sc.empty(dims=['x'], shape=(2, ))
with pytest.raises(RuntimeError):
var.values = np.arange(3)
def test_2D_access():
var = sc.empty(dims=['x', 'y'], shape=(2, 3))
assert var.values.shape == (2, 3)
assert len(var.values) == 2
assert len(var.values[0]) == 3
var.values[1] = 1.2 # numpy assigns to all elements in "slice"
var.values[1][2] = 2.2
assert var.values[1][0] == 1.2
assert var.values[1][1] == 1.2
assert var.values[1][2] == 2.2
def test_2D_access_bad_shape_fail():
var = sc.empty(dims=['x', 'y'], shape=(2, 3))
with pytest.raises(RuntimeError):
var.values = np.ones(shape=(3, 2))
def test_2D_access_variances():
shape = (2, 3)
var = sc.Variable(dims=['x', 'y'],
values=np.full(shape, 29.0),
variances=np.zeros(shape))
assert var.values.shape == (2, 3)
assert var.variances.shape == (2, 3)
var.values[1] = 1.2
assert np.array_equal(var.variances, np.zeros(shape=shape))
var.variances = np.ones(shape=shape)
assert np.array_equal(var.variances, np.ones(shape=shape))
def test_getitem():
var = sc.Variable(dims=['x', 'y'], values=np.arange(0, 8).reshape(2, 4))
var_slice = var['x', 1:2]
assert sc.identical(
var_slice, sc.Variable(dims=['x', 'y'], values=np.arange(4, 8).reshape(1, 4)))
def test_setitem_broadcast():
var = sc.Variable(dims=['x'], values=[1, 2, 3, 4], dtype=sc.dtype.int64)
var['x', 1:3] = sc.scalar(5, dtype=sc.dtype.int64)
assert sc.identical(
var, sc.Variable(dims=['x'], values=[1, 5, 5, 4], dtype=sc.dtype.int64))
def test_slicing():
var = sc.Variable(dims=['x'], values=np.arange(0, 3))
for slice_, expected in ((slice(0, 2), [0, 1]), (slice(-3,
-1), [0,
1]), (slice(2,
1), [])):
var_slice = var[('x', slice_)]
assert len(var_slice.values) == len(expected)
assert np.array_equal(var_slice.values, np.array(expected))
def test_sizes():
a = sc.scalar(1)
assert a.sizes == {}
a = sc.empty(dims=['x'], shape=[2])
assert a.sizes == {'x': 2}
a = sc.empty(dims=['y', 'z'], shape=[3, 4])
assert a.sizes == {'y': 3, 'z': 4}
def test_iadd():
expected = sc.scalar(2.2)
a = sc.scalar(1.2)
b = a
a += 1.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
# This extra check is important: It can happen that an implementation of,
# e.g., __iadd__ does an in-place modification, updating `b`, but then the
# return value is assigned to `a`, which could break the connection unless
# the correct Python object is returned.
a += 1.0
assert sc.identical(a, b)
def test_isub():
expected = sc.scalar(2.2 - 1.0)
a = sc.scalar(2.2)
b = a
a -= 1.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a -= 1.0
assert sc.identical(a, b)
def test_imul():
expected = sc.scalar(2.4)
a = sc.scalar(1.2)
b = a
a *= 2.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a *= 2.0
assert sc.identical(a, b)
def test_idiv():
expected = sc.scalar(1.2)
a = sc.scalar(2.4)
b = a
a /= 2.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a /= 2.0
assert sc.identical(a, b)
def test_iand():
expected = sc.scalar(False)
a = sc.scalar(True)
b = a
a &= sc.scalar(False)
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a |= sc.scalar(True)
assert sc.identical(a, b)
def test_ior():
expected = sc.scalar(True)
a = sc.scalar(False)
b = a
a |= sc.scalar(True)
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a &= sc.scalar(True)
assert sc.identical(a, b)
def test_ixor():
expected = sc.scalar(True)
a = sc.scalar(False)
b = a
a ^= sc.scalar(True)
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a ^= sc.scalar(True)
assert sc.identical(a, b)
def test_binary_plus():
a, b, a_slice, b_slice, data = make_variables()
c = a + b
assert np.array_equal(c.values, data + data)
c = a + 2.0
assert np.array_equal(c.values, data + 2.0)
c = a + b_slice
assert np.array_equal(c.values, data + data)
c += b
assert np.array_equal(c.values, data + data + data)
c += b_slice
assert | np.array_equal(c.values, data + data + data + data) | numpy.array_equal |
import os
import numpy as np
import pytest
import xarray as xr
from xclim import atmos
from xclim.core.calendar import percentile_doy
from xclim.core.options import set_options
from xclim.core.units import convert_units_to
from xclim.testing import open_dataset
K2C = 273.15
class TestCSDI:
def test_simple(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn += K2C
tn[10:20] -= 2
tn = tasmin_series(tn)
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert out[0] == 10
def test_convert_units(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn[10:20] -= 2
tn = tasmin_series(tn + K2C)
tn.attrs["units"] = "C"
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert out[0] == 10
def test_nan_presence(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ K2C
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn[10:20] -= 2
tn[9] = np.nan
tn = tasmin_series(tn)
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert np.isnan(out[0])
class TestDTR:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_DTR_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
dtr = atmos.daily_temperature_range(tasmin, tasmax, freq="MS")
dtrC = atmos.daily_temperature_range(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
dtr1 = max1 - min1
np.testing.assert_array_equal(dtr, dtrC)
assert dtr.attrs["units"] == "K"
assert np.allclose(dtr1[0:31].mean(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
dtr = atmos.max_daily_temperature_range(tasmin, tasmax, freq="MS")
dtrC = atmos.max_daily_temperature_range(tasmin_C, tasmax_C, freq="MS")
np.testing.assert_array_equal(dtr, dtrC)
assert dtr.attrs["units"] == "K"
assert np.allclose(dtr1[0:31].max(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
class TestDTRVar:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_dtr_var_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
dtr = atmos.daily_temperature_range_variability(tasmin, tasmax, freq="MS")
dtrC = atmos.daily_temperature_range_variability(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
assert dtr.attrs["units"] == "K"
dtr1a = max1 - min1
dtr1 = abs(np.diff(dtr1a))
np.testing.assert_array_equal(dtr, dtrC)
# first month jan use 0:30 (n==30) because of day to day diff
assert np.allclose(dtr1[0:30].mean(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
class TestETR:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_dtr_var_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
etr = atmos.extreme_temperature_range(tasmin, tasmax, freq="MS")
etrC = atmos.extreme_temperature_range(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
np.testing.assert_array_equal(etr, etrC)
etr1 = max1[0:31].max() - min1[0:31].min()
assert np.allclose(etr1, etr.values[0, 0, 0])
assert np.isnan(etr.values[1, 1, 0])
assert np.isnan(etr.values[0, -1, -1])
class TestTmean:
nc_files = (
os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc"),
os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc"),
)
def test_Tmean_3d_data(self):
ds_tmax = open_dataset(self.nc_files[0])
ds_tmin = open_dataset(self.nc_files[1])
tas = atmos.tg(ds_tmin.tasmin, ds_tmax.tasmax)
tas_C = atmos.tg(ds_tmin.tasmin, ds_tmax.tasmax)
tas_C.values -= K2C
tas_C.attrs["units"] = "C"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tas_C.values[180, 1, 0] = np.nan
tmmean = atmos.tg_mean(tas)
tmmeanC = atmos.tg_mean(tas_C)
x1 = tas.values[:, 0, 0]
tmmean1 = x1.mean()
# TODO: Investigate the differences between the two outputs.
# The conversion to K is done after / before the mean.
np.testing.assert_array_almost_equal(tmmeanC, tmmean, 3)
# test single point vs manual
assert np.allclose(tmmean1, tmmean.values[0, 0, 0], tmmeanC.values[0, 0, 0])
# test single nan point
assert np.isnan(tmmean.values[0, 1, 0])
# test all nan point
assert np.isnan(tmmean.values[0, -1, -1])
class TestTx:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_TX_3d_data(self):
tasmax = open_dataset(self.nc_file).tasmax
tasmax_C = open_dataset(self.nc_file).tasmax
tasmax_C.values -= K2C
tasmax_C.attrs["units"] = "C"
# put a nan somewhere
tasmax.values[180, 1, 0] = np.nan
tasmax_C.values[180, 1, 0] = np.nan
txmean = atmos.tx_mean(tasmax)
txmax = atmos.tx_max(tasmax)
txmin = atmos.tx_min(tasmax)
txmeanC = atmos.tx_mean(tasmax_C)
txmaxC = atmos.tx_max(tasmax_C)
txminC = atmos.tx_min(tasmax_C)
no_nan = (
~np.isnan(txmean).values & ~np.isnan(txmax).values & ~np.isnan(txmin).values
)
# test maxes always greater than mean and mean always greater than min (non nan values only)
assert np.all(txmax.values[no_nan] > txmean.values[no_nan]) & np.all(
txmean.values[no_nan] > txmin.values[no_nan]
)
np.testing.assert_array_almost_equal(txmeanC, txmean, 3)
np.testing.assert_array_equal(txminC, txmin)
np.testing.assert_array_equal(txmaxC, txmax)
x1 = tasmax.values[:, 0, 0]
txmean1 = x1.mean()
txmin1 = x1.min()
txmax1 = x1.max()
# test single point vs manual
assert np.allclose(txmean1, txmean.values[0, 0, 0], txmeanC.values[0, 0, 0])
assert np.allclose(txmax1, txmax.values[0, 0, 0], txmaxC.values[0, 0, 0])
assert np.allclose(txmin1, txmin.values[0, 0, 0], txminC.values[0, 0, 0])
# test single nan point
assert np.isnan(txmean.values[0, 1, 0])
assert np.isnan(txmin.values[0, 1, 0])
assert np.isnan(txmax.values[0, 1, 0])
# test all nan point
assert np.isnan(txmean.values[0, -1, -1])
assert np.isnan(txmin.values[0, -1, -1])
assert np.isnan(txmax.values[0, -1, -1])
class TestTn:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_TN_3d_data(self):
tasmin = open_dataset(self.nc_file).tasmin
tasmin_C = open_dataset(self.nc_file).tasmin
tasmin_C.values -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[180, 1, 0] = np.nan
tasmin_C.values[180, 1, 0] = np.nan
tnmean = atmos.tn_mean(tasmin)
tnmax = atmos.tn_max(tasmin)
tnmin = atmos.tn_min(tasmin)
tnmeanC = atmos.tn_mean(tasmin_C)
tnmaxC = atmos.tn_max(tasmin_C)
tnminC = atmos.tn_min(tasmin_C)
no_nan = (
~np.isnan(tnmean).values & ~np.isnan(tnmax).values & ~np.isnan(tnmin).values
)
# test maxes always greater than mean and mean alwyas greater than min (non nan values only)
assert np.all(tnmax.values[no_nan] > tnmean.values[no_nan]) & np.all(
tnmean.values[no_nan] > tnmin.values[no_nan]
)
np.testing.assert_array_almost_equal(tnmeanC, tnmean, 3)
np.testing.assert_array_equal(tnminC, tnmin)
np.testing.assert_array_equal(tnmaxC, tnmax)
x1 = tasmin.values[:, 0, 0]
txmean1 = x1.mean()
txmin1 = x1.min()
txmax1 = x1.max()
# test single point vs manual
assert np.allclose(txmean1, tnmean.values[0, 0, 0], tnmeanC.values[0, 0, 0])
assert np.allclose(txmax1, tnmax.values[0, 0, 0], tnmaxC.values[0, 0, 0])
assert np.allclose(txmin1, tnmin.values[0, 0, 0], tnminC.values[0, 0, 0])
# test single nan point
assert np.isnan(tnmean.values[0, 1, 0])
assert np.isnan(tnmin.values[0, 1, 0])
assert np.isnan(tnmax.values[0, 1, 0])
# test all nan point
assert np.isnan(tnmean.values[0, -1, -1])
assert np.isnan(tnmin.values[0, -1, -1])
assert np.isnan(tnmax.values[0, -1, -1])
class TestConsecutiveFrostDays:
def test_one_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [1])
def test_three_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [3])
def test_two_equal_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
a[6:9] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [3])
def test_two_events_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
a[6:10] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [4])
def test_convert_units_freeze_day(self, tasmin_series):
a = np.zeros(365) + 5.0
a[2:5] -= 20
a[6:10] -= 20
ts = tasmin_series(a)
ts.attrs["units"] = "C"
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [4])
def test_one_nan_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2] -= 20
a[-1] = np.nan
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [np.nan])
class TestConsecutiveFrostFreeDays:
def test_real_data(self, atmosds):
tasmin = atmosds.tasmin
test = atmos.maximum_consecutive_frost_free_days(tasmin)
np.testing.assert_allclose(test[2, 0], [68], rtol=1e-1)
assert (
"Annual maximum number of consecutive days with minimum daily temperature above or equal to 0 degc."
) in test.description
class TestFrostSeasonLength:
def test_simple(self, tasmin_series):
a = np.zeros(730) + K2C + 15
a[300:400] = K2C - 5
a[404:407] = K2C - 5
tasmin = tasmin_series(a, start="2000-01-01")
# Default, window = 5, mid_date = 07-01, freq= AS-JUL
out = atmos.frost_season_length(tasmin=tasmin)
np.testing.assert_array_equal(out, [np.nan, 107, np.nan])
out = atmos.frost_season_length(tasmin=tasmin, window=3)
np.testing.assert_array_equal(out, [np.nan, 100, np.nan])
out = atmos.frost_season_length(tasmin=tasmin, mid_date="07-01", freq="YS")
np.testing.assert_array_equal(out, [np.nan, np.nan])
class TestColdSpellDays:
def test_simple(self, tas_series):
a = np.zeros(365) + K2C
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
ts = tas_series(a)
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0])
out = atmos.cold_spell_frequency(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_convert_units(self, tas_series):
a = np.zeros(365)
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
ts = tas_series(a)
ts.attrs["units"] = "C"
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0])
out = atmos.cold_spell_frequency(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_nan_presence(self, tas_series):
a = np.zeros(365) + K2C
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
a[-1] = np.nan
ts = tas_series(a)
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, np.nan])
out = atmos.cold_spell_frequency(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, np.nan])
class TestFrostDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tasmin = open_dataset(self.nc_file).tasmin
tasminC = open_dataset(self.nc_file).tasmin
tasminC -= K2C
tasminC.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[180, 1, 0] = np.nan
tasminC.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 273.16
fd = atmos.frost_days(tasmin, freq="YS")
fdC = atmos.frost_days(tasminC, freq="YS")
# fds = xci.frost_days(tasmin, thresh=thresh, freq='YS', skipna=True)
x1 = tasmin.values[:, 0, 0]
fd1 = (x1[x1 < thresh]).size
np.testing.assert_array_equal(fd, fdC)
assert np.allclose(fd1, fd.values[0, 0, 0])
# assert (np.allclose(fd1, fds.values[0, 0, 0]))
assert np.isnan(fd.values[0, 1, 0])
# assert (np.allclose(fd2, fds.values[0, 1, 0]))
assert np.isnan(fd.values[0, -1, -1])
# assert (np.isnan(fds.values[0, -1, -1]))
class TestIceDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tasC = open_dataset(self.nc_file).tasmax
tasC -= K2C
tasC.attrs["units"] = "C"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tasC.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 273.16
fd = atmos.ice_days(tas, freq="YS")
fdC = atmos.ice_days(tasC, freq="YS")
x1 = tas.values[:, 0, 0]
fd1 = (x1[x1 < thresh]).size
np.testing.assert_array_equal(fd, fdC)
assert np.allclose(fd1, fd.values[0, 0, 0])
assert np.isnan(fd.values[0, 1, 0])
assert np.isnan(fd.values[0, -1, -1])
class TestCoolingDegreeDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tas.attrs["cell_methods"] = "time: mean within days"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 18 + K2C
cdd = atmos.cooling_degree_days(tas, thresh="18 C", freq="YS")
x1 = tas.values[:, 0, 0]
cdd1 = (x1[x1 > thresh] - thresh).sum()
assert np.allclose(cdd1, cdd.values[0, 0, 0])
assert np.isnan(cdd.values[0, 1, 0])
assert np.isnan(cdd.values[0, -1, -1])
def test_convert_units(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tas.values -= K2C
tas.attrs["units"] = "C"
tas.attrs["cell_methods"] = "time: mean within days"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 18
cdd = atmos.cooling_degree_days(tas, thresh="18 C", freq="YS")
x1 = tas.values[:, 0, 0]
# x2 = tas.values[:, 1, 0]
cdd1 = (x1[x1 > thresh] - thresh).sum()
# gdd2 = (x2[x2 > thresh] - thresh).sum()
assert np.allclose(cdd1, cdd.values[0, 0, 0])
# assert (np.allclose(gdd1, gdds.values[0, 0, 0]))
assert np.isnan(cdd.values[0, 1, 0])
# assert (np.allclose(gdd2, gdds.values[0, 1, 0]))
assert np.isnan(cdd.values[0, -1, -1])
# assert (np.isnan(gdds.values[0, -1, -1]))
class TestHeatingDegreeDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tas.attrs["cell_methods"] = "time: mean within days"
# compute with both skipna options
thresh = 17 + K2C
hdd = atmos.heating_degree_days(tas, freq="YS")
x1 = tas.values[:, 0, 0]
hdd1 = (thresh - x1).clip(min=0).sum()
assert np.allclose(hdd1, hdd.values[0, 0, 0])
assert np.isnan(hdd.values[0, 1, 0])
assert np.isnan(hdd.values[0, -1, -1])
def test_convert_units(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tas.values -= K2C
tas.attrs["units"] = "C"
tas.attrs["cell_methods"] = "time: mean within days"
# compute with both skipna options
thresh = 17
hdd = atmos.heating_degree_days(tas, freq="YS")
x1 = tas.values[:, 0, 0]
hdd1 = (thresh - x1).clip(min=0).sum()
assert np.allclose(hdd1, hdd.values[0, 0, 0])
assert np.isnan(hdd.values[0, 1, 0])
assert np.isnan(hdd.values[0, -1, -1])
class TestGrowingDegreeDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tas.attrs["cell_methods"] = "time: mean within days"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = K2C + 4
gdd = atmos.growing_degree_days(tas, freq="YS")
# gdds = xci.growing_degree_days(tas, thresh=thresh, freq='YS', skipna=True)
x1 = tas.values[:, 0, 0]
# x2 = tas.values[:, 1, 0]
gdd1 = (x1[x1 > thresh] - thresh).sum()
# gdd2 = (x2[x2 > thresh] - thresh).sum()
assert np.allclose(gdd1, gdd.values[0, 0, 0])
assert np.isnan(gdd.values[0, 1, 0])
assert np.isnan(gdd.values[0, -1, -1])
class TestHeatWaveFrequency:
def test_1d(self, tasmax_series, tasmin_series):
tn1 = np.zeros(366)
tx1 = np.zeros(366)
tn1[:10] = np.array([20, 23, 23, 23, 23, 21, 23, 23, 23, 23])
tx1[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tn = tasmin_series(tn1 + K2C, start="1/1/2000")
tx = tasmax_series(tx1 + K2C, start="1/1/2000")
tnC = tasmin_series(tn1, start="1/1/2000")
tnC.attrs["units"] = "C"
txC = tasmax_series(tx1, start="1/1/2000")
txC.attrs["units"] = "C"
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
hwfC = atmos.heat_wave_frequency(
tnC, txC, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
np.testing.assert_array_equal(hwf, hwfC)
np.testing.assert_allclose(hwf.values[:1], 2)
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", window=4, freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 1)
# one long hw
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="10 C", thresh_tasmax="10 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 1)
# no hw
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="40 C", thresh_tasmax="40 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 0)
class TestHeatWaveMaxLength:
def test_1d(self, tasmax_series, tasmin_series):
tn1 = np.zeros(366)
tx1 = np.zeros(366)
tn1[:10] = np.array([20, 23, 23, 23, 23, 21, 23, 23, 23, 23])
tx1[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tn = tasmin_series(tn1 + K2C, start="1/1/2000")
tx = tasmax_series(tx1 + K2C, start="1/1/2000")
tnC = tasmin_series(tn1, start="1/1/2000")
tnC.attrs["units"] = "C"
txC = tasmax_series(tx1, start="1/1/2000")
txC.attrs["units"] = "C"
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
hwfC = atmos.heat_wave_max_length(
tnC, txC, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
np.testing.assert_array_equal(hwf, hwfC)
np.testing.assert_allclose(hwf.values[:1], 4)
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="20 C", thresh_tasmax="30 C", window=4, freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 5)
# one long hw
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="10 C", thresh_tasmax="10 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 10)
# no hw
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="40 C", thresh_tasmax="40 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 0)
class TestHeatWaveTotalLength:
def test_1d(self, tasmax_series, tasmin_series):
tn1 = np.zeros(366)
tx1 = np.zeros(366)
tn1[:10] = np.array([20, 23, 23, 23, 23, 21, 23, 23, 23, 23])
tx1[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tn = tasmin_series(tn1 + K2C, start="1/1/2000")
tx = tasmax_series(tx1 + K2C, start="1/1/2000")
tnC = tasmin_series(tn1, start="1/1/2000")
tnC.attrs["units"] = "C"
txC = tasmax_series(tx1, start="1/1/2000")
txC.attrs["units"] = "C"
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
hwfC = atmos.heat_wave_total_length(
tnC, txC, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
np.testing.assert_array_equal(hwf, hwfC)
np.testing.assert_allclose(hwf.values[:1], 7)
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="20 C", thresh_tasmax="30 C", window=4, freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 5)
# one long hw
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="10 C", thresh_tasmax="10 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 10)
# no hw
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="40 C", thresh_tasmax="40 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 0)
class TestHeatWaveIndex:
def test_simple(self, tasmax_series):
tx = np.zeros(366)
tx[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tx = tasmax_series(tx + K2C, start="1/1/2000")
hwi = atmos.heat_wave_index(tx, freq="YS")
| np.testing.assert_array_equal(hwi, [10]) | numpy.testing.assert_array_equal |
import numpy as np
import os
import matplotlib.pyplot as plt
import sklearn.datasets as skld
import sklearn.model_selection as sklms
import sklearn.ensemble as skle
from bayeso import bo
from bayeso.utils import utils_bo
from bayeso.utils import utils_common
from bayeso.utils import utils_plotting
from bayeso import constants
STR_FUN_TARGET = 'real1'
INT_BO = 20
INT_ITER = 50
INT_INIT = 3
data = skld.fetch_olivetti_faces()
images = data.images
targets = data.target
print(images.shape)
print(targets.shape)
X_train, X_test, y_train, y_test = sklms.train_test_split(images, targets, test_size=0.2, random_state=42, stratify=targets)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1] * X_train.shape[2]))
X_test = | np.reshape(X_test, (X_test.shape[0], X_test.shape[1] * X_test.shape[2])) | numpy.reshape |
import sys
sys.path.append('../')
import rosbag
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from tempfile import TemporaryFile
from analysis_lib.data_smooth import convolution_window_1d
class bag_analysis:
def __init__(self, path_file):
# uwb coordinate system
self.bag = rosbag.Bag(path_file)
self.rotation_opti = [[-0.04425313, 0.99902035], [-0.99902035, -0.04425313]]
self.transition_opti = [[4.41968278], [4.13352817]]
self.transition_odom = [[1], [1]]
self.uwb_data = []
self.opti_data = []
self.odom_data = []
def read_anchor_dis(self, topic_name):
self.bag_msg = self.bag.read_messages(topics = topic_name)
self.agent_dis = [[] for i in range(4)]
for topic, msg, t in self.bag_msg:
if topic == topic_name and len(msg.nodes) == 4:
for i in range(4):
distance = msg.nodes[i].distance
self.agent_dis[i].append(distance)
return self.agent_dis
def read_location(self, topic_name, mode = 'uwb'):
bag_msg = self.bag.read_messages(topics = topic_name)
for topic, msg, t in bag_msg:
if topic == topic_name:
if mode == 'uwb':
self.uwb_data.append([msg.position.x, msg.position.y])
if mode == 'optitrack':
self.opti_data.append([msg.x, msg.y])
if mode == 'odom':
self.odom_data.append([msg.pose.pose.position.x, msg.pose.pose.position.y])
if len(self.uwb_data) != 0:
self.uwb_data = np.array(self.uwb_data)
if len(self.opti_data) != 0:
self.opti_data = np.array(self.opti_data)
if len(self.odom_data) != 0:
self.odom_data = np.array(self.odom_data)
return self.uwb_data, self.opti_data, self.odom_data
# def trans_data():
def update_trans_opti(self, transition, rotation):
self.rotation_opti = rotation
self.transition_opti = transition
def update_trans_odom(self, transition = []):
if not transition:
self.transition_odom = self.uwb_data[0, :] - self.odom_data[0, :]
else:
self.transition_odom = transition
# def read_locations(self, topic_name):
def transform(self):
self.opti_data = np.transpose(np.dot(self.rotation_opti, np.transpose(self.opti_data)) + self.transition_opti)
self.odom_data = self.odom_data + self.transition_odom
def smooth_dis_data(self):
self.agent_dis_smooth = convolution_window_1d(self.agent_dis)
def dis_plot(self, ax, name, if_raw = True, if_smooth = True, window_len = 0):
num1 = 0
num2 = 0
if if_raw == True:
for distance in self.agent_dis:
num1 = num1 + 1
x_arrange = np.arange(len(distance))
distance = | np.array(distance) | numpy.array |
from pathlib import Path
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from src.dataset import TianchiOCRDataset, TianchiOCRDataLoader
def norm(v):
return np.sqrt((v * v).sum())
def calc_quadrilateral_w_h_ratios(q):
w, h = np.abs(q[:, 4] - q[:, 0]), | np.abs(q[:, 5] - q[:, 1]) | numpy.abs |
import os
import tempfile
from unittest import TestCase
import numpy as np
from keras.layers import Dense
from keras.constraints import max_norm
from keras.models import Sequential, load_model
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras_lamb import Lamb
class TestOptimizer(TestCase):
def test_fit(self):
model = Sequential()
model.add(Dense(input_shape=(5,), units=3, bias_constraint=max_norm(10.0)))
model.compile(optimizer=Lamb(decay=1e-6), loss='mse')
model_path = os.path.join(tempfile.gettempdir(), 'keras_lamb_%.4f.h5' % np.random.random())
model.save(model_path)
model = load_model(model_path, custom_objects={'Lamb': Lamb})
model.summary()
target_w = np.random.standard_normal((5, 3))
target_b = | np.random.standard_normal(3) | numpy.random.standard_normal |
# coding=utf-8
import sys
sys.path.insert(0, '/home/chujie/PycharmProjects/amsoftmax_face_recognition_caffe/AM-Softmax-caffe/python')
import caffe
import cv2
import os
import numpy as np
import math
import copy
from numpy.linalg import inv, norm, lstsq
from numpy.linalg import matrix_rank as rank
of = 0 # 人脸对齐时多裁剪像素点
std_mean = 127.5
std_scale = 0.0078125
batchsize = 128
factor = 0.709
minisize = 30 # 检测人脸的最小值
# pnet
pnet_stride = 2
pnet_cell_size = 12
pnet_thread = 0.95
# rnet
rnet_thread = 0.95
# onet
onet_thread = 0.95
def Align_sphereface(input_image, points, output_size=(96, 112)):
image = copy.deepcopy(input_image)
src = np.matrix([[points[0], points[2], points[4], points[6], points[8]],
[points[1], points[3], points[5], points[7], points[9]], [1, 1, 1, 1, 1]])
dst = np.matrix([[30.2946, 65.5318, 48.0252, 33.5493, 62.7299],
[51.6963, 51.5014, 71.7366, 92.3655, 92.2041]])
T = (src * src.T).I * src * dst.T
img_affine = cv2.warpAffine(image, T.T, output_size)
return img_affine
def Align_seqface(input_image, points, output_size=(128, 128)):
image = copy.deepcopy(input_image)
eye_center_x = (points[0] + points[2]) * 0.5
eye_center_y = (points[1] + points[3]) * 0.5
mouse_center_x = (points[6] + points[8]) * 0.5
mouse_center_y = (points[7] + points[9]) * 0.5
rad_tan = 1.0 * (points[3] - points[1]) / (points[2] - points[0])
rad = math.atan(rad_tan)
deg = np.rad2deg(rad)
width = int(math.fabs(math.sin(rad)) * image.shape[0] + math.fabs(math.cos(rad)) * image.shape[1])
height = int(math.fabs(math.cos(rad)) * image.shape[0] + math.fabs(math.sin(rad)) * image.shape[1])
transformMat = cv2.getRotationMatrix2D((eye_center_x, eye_center_y), deg, 1.0)
dst = cv2.warpAffine(image, transformMat, (width, height))
diff_x = mouse_center_x - eye_center_x
diff_y = mouse_center_y - eye_center_y
r_mouse_center_y = diff_y * float(math.cos(rad)) - diff_x * float(math.sin(rad)) + eye_center_y
d = r_mouse_center_y - eye_center_y + 1
dx = int(d * 3 / 2.0)
dy = int(d * 3 / 3.0)
x0 = int(eye_center_x) - dx
x0 = max(x0, 0)
x1 = int(eye_center_x + (3 * d - dx)) - 1
x1 = min(x1, width - 1)
y0 = int(eye_center_y) - dy
y0 = max(y0, 0)
y1 = int(eye_center_y + (3 * d - dy)) - 1
y1 = min(y1, height - 1)
alignface = dst[y0:y1, x0:x1, :]
alignface = cv2.resize(alignface, (128, 128))
return alignface
def CalScale(width, height):
scales = []
scale = 12.0 / minisize # 12.0/30
minWH = min(height, width) * scale
while minWH >= 12.0:
scales.append(scale)
minWH *= factor
scale *= factor
return scales
def BBoxRegression(results):
for result in results:
box = result['faceBox']
bbox_reg = result['bbox_reg']
w = box[2] - box[0] + 1
h = box[3] - box[1] + 1
box[0] += bbox_reg[0] * w
box[1] += bbox_reg[1] * h
box[2] += bbox_reg[2] * w
box[3] += bbox_reg[3] * h
return results
def BBoxPad(results, width, height):
for result in results:
box = result['faceBox']
box[0] = round(max(box[0], 0.0))
box[1] = round(max(box[1], 0.0))
box[2] = round(min(box[2], width - 1.0))
box[3] = round(min(box[3], height - 1.0))
return results
def BBoxPadSquare(results, width, height):
for result in results:
box = result['faceBox']
w = box[2] - box[0] + 1;
h = box[3] - box[1] + 1;
side = max(w, h)
box[0] = round(max(box[0] + (w - side) * 0.5, 0))
box[1] = round(max(box[1] + (h - side) * 0.5, 0.))
box[2] = round(min(box[0] + side - 1.0, width - 1.0))
box[3] = round(min(box[1] + side - 1.0, height - 1.0))
return results
def NMS(results, thresh, methodType):
bboxes_nms = []
if len(results) == 0:
return bboxes_nms
else:
results = sorted(results, key=lambda result: result['bbox_score'], reverse=True)
flag = | np.zeros_like(results) | numpy.zeros_like |
import numpy as np
import pp
from scipy.constants import c as SPEED_OF_LIGHT
from simphony.elements import Model
from simphony.tools import freq2wl, interpolate, wl2freq
def load(component, **kwargs):
""" load Sparameters for a component
Args:
component: component factory or instance
**kwargs
"""
component = pp.call_if_func(component, **kwargs)
pins, f, s = pp.sp.load(component)
def interpolate_sp(freq):
return interpolate(freq, f, s)
m = Model()
m.pins = pins
m.s_params = (f, s)
m.s_parameters = interpolate_sp
m.freq_range = (
m.s_params[0][0],
m.s_params[0][-1],
) #: The valid frequency range for this model.
return m
if __name__ == "__main__":
import matplotlib.pyplot as plt
c = load(pp.c.mmi1x2())
wav = | np.linspace(1520, 1570, 1024) | numpy.linspace |
#!/usr/bin/env python
#************************************************************************
#
# Plot figures and output numbers for Plate 1.1 .
# For BAMS SotC 2016
#
#************************************************************************
# SVN Info
# $Rev:: 29 $: Revision of last commit
# $Author:: rdunn $: Author of last commit
# $Date:: 2020-08-05 12:12:39 +0100 (Wed, 05 Aug #$: Date of last commit
#************************************************************************
# START
#************************************************************************
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import datetime as dt
import numpy as np
import iris
import utils # RJHD utilities
import settings
DATALOC = "{}/{}/data/".format(settings.ROOTLOC, settings.YEAR)
image_loc = "{}/{}/images/".format(settings.ROOTLOC, settings.YEAR)
LW = 2
print("Standardise individual plotting scripts \nThen use main() and import statements so \ncan call the read routines only once")
insitu = "k"
satellite = "r"
reanalyses = "b"
#************************************************************************
def make_plot(ax, ts_list, color, plot_label="", ylabel="", ylim=[], ls="-", scatter=False):
"""
Plot the lines and other details on the axis.
"""
assert isinstance(ts_list, list)
zorder = 1
if color == "k": zorder = 10
if color == "r": zorder = 5
for ts in ts_list:
if scatter:
ax.plot(ts.times, ts.data, color=color, ls="", marker=".", zorder=zorder)
else:
ax.plot(ts.times, ts.data, color=color, ls=ls, lw=LW, zorder=zorder)
if ylabel != "":
ax.set_ylabel(ylabel)
if ylim != []:
ax.set_ylim(ylim)
if plot_label != "":
ax.text(0.02, 0.75, plot_label, transform=ax.transAxes)
# autogenerate text for caption
if color == insitu:
print("In Situ: {}".format(len(ts_list)))
elif color == satellite:
print("Satellite: {}".format(len(ts_list)))
elif color == reanalyses:
print("Reanalyses: {}".format(len(ts_list)))
return # make_plot
#************************************************************************
# have separate read script for each section.
'''
black - in situ
blue - reanalyses
red - satellite
'''
#************************************************************************
def read_sie(filename):
indata = np.genfromtxt(filename, dtype=(float), skip_header=3)
arctic_max = utils.Timeseries("SIE", indata[:, 0], indata[:, 2])
arctic_min = utils.Timeseries("SIE", indata[:, 0], indata[:, 4])
antarctic_min = utils.Timeseries("SIE", indata[:, 0], indata[:, 6])
antarctic_max = utils.Timeseries("SIE", indata[:, 0], indata[:, 8])
return arctic_max, arctic_min, antarctic_min, antarctic_max # read_sie
#************************************************************************
def read_ohc(filename):
indata = np.genfromtxt(filename, dtype=(float), skip_header=1)
hadley = utils.Timeseries("OHC", indata[:, 0], indata[:, 1])
csiro = utils.Timeseries("OHC", indata[:, 0], indata[:, 3])
pmel = utils.Timeseries("OHC", indata[:, 0], indata[:, 5])
ncei = utils.Timeseries("OHC", indata[:, 0], indata[:, 7])
mri = utils.Timeseries("OHC", indata[:, 0], indata[:, 9])
iap = utils.Timeseries("OHC", indata[:, 0], indata[:, 9])
# reapply updated climatology
dummy, hadley = utils.calculate_climatology_and_anomalies_1d(hadley, 1993, 2016)
dummy, csiro = utils.calculate_climatology_and_anomalies_1d(csiro, 1993, 2016)
dummy, pmel = utils.calculate_climatology_and_anomalies_1d(pmel, 1993, 2016)
dummy, ncei = utils.calculate_climatology_and_anomalies_1d(ncei, 1993, 2016)
dummy, mri = utils.calculate_climatology_and_anomalies_1d(mri, 1993, 2016)
dummy, iap = utils.calculate_climatology_and_anomalies_1d(iap, 1993, 2016)
return hadley, csiro, pmel, ncei, mri, iap # read_ohc
#************************************************************************
def read_slr(filename):
indata = np.genfromtxt(filename, dtype=(float))
slr = utils.Timeseries("SLR", indata[:, 0], indata[:, 1])
return slr # read_SLR
#************************************************************************
def read_swv(filename):
indata = np.genfromtxt(filename, dtype=(float), delimiter=",")
swv = utils.Timeseries("SWV", indata[:, 0], indata[:, 1])
return swv # read_SWV
#************************************************************************
def read_arct(filename):
""" Read the Arctic Temperatures"""
indata = np.genfromtxt(filename, delimiter=",", skip_header=1)
arct = utils.Timeseries("ARCT", indata[:, 0], indata[:, 1])
return arct # read_arct
#************************************************************************
def toYearFraction(date):
import time
def sinceEpoch(date): # returns seconds since epoch
return time.mktime(date.timetuple())
s = sinceEpoch
year = date.year
startOfThisYear = dt(year=year, month=1, day=1)
startOfNextYear = dt(year=year+1, month=1, day=1)
yearElapsed = s(date) - s(startOfThisYear)
yearDuration = s(startOfNextYear) - s(startOfThisYear)
fraction = yearElapsed/yearDuration
return date.year + fraction
#************************************************************************
def annual_from_monthly(indata):
try:
times = indata.times.reshape(-1, 12)
monthly = indata.data.reshape(-1, 12)
annuals = | np.ma.mean(monthly, axis=1) | numpy.ma.mean |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out2.numpy()).all()
set_global_rng_seed(111)
out3 = F.nn.dropout(data, rate, training=True)
assert (out1.numpy() == out3.numpy()).all()
set_global_rng_seed(222)
out4 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out4.numpy()).all()
test_dropout_with_shape([13, 17, 63, 21], 0.4)
test_dropout_with_shape([16, 32, 64], 0.3)
test_multiple_dropout([1024], 0.2)
test_dropout_seed([16, 32], 0.2)
def test_matinv():
shape1 = (5, 5)
shape2 = (3, 9, 9)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
# make matrix diagonally dominant for numerical stability
data1 += (np.eye(shape1[0]) * shape1[0]).astype("float32")
data2 += np.broadcast_to((np.eye(shape2[1]) * shape2[1]).astype("float32"), shape2)
cases = [
{"input": data1},
{"input": data2},
]
opr_test(
cases,
F.matinv,
compare_fn=lambda x, y: np.testing.assert_allclose(x.numpy(), y, rtol=1e-4),
ref_fn=np.linalg.inv,
)
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (2,)
shape2 = (batch_size, 2, 3)
shape3 = (batch_size, 3, 4)
shape4 = (batch_size, 10, 4, 2)
shape5 = (batch_size, 10, 2, 4)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
data5 = np.random.random(shape5).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
{"input": [data4, data5]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
opr_test(
[{"input": [data1, data4]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x, y.transpose(0, 1, 3, 2)),
transpose_b=True,
)
opr_test(
[{"input": [data3, data2]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x.transpose(0, 2, 1), y.transpose(0, 2, 1)),
transpose_a=True,
transpose_b=True,
)
@pytest.mark.parametrize(
"shape_a, shape_b", [((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10)),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_matmul_empty_tensor(shape_a, shape_b, is_symbolic):
def func(a, b):
return F.matmul(a, b)
if is_symbolic is not None:
func = jit.trace(symbolic=is_symbolic)(func)
a = tensor(np.random.randn(*shape_a))
b = tensor(np.random.randn(*shape_b))
for _ in range(3):
out = func(a, b)
assert np.all(out.numpy() == 0)
if is_symbolic is None:
break
def test_interpolate():
def linear_interpolate():
inp = tensor( | np.arange(1, 3, dtype=np.float32) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 29 13:35:12 2017
@author: yu
"""
import os
import numpy as np
import scipy.linalg as linalg
import cv2
import operator
import matplotlib.pyplot as plt
def ComputeNorm(x):
# function r=ComputeNorm(x)
# computes vector norms of x
# x: d x m matrix, each column a vector
# r: 1 x m matrix, each the corresponding norm (L2)
[row, col] = x.shape
r = np.zeros((1,col))
for i in range(col):
r[0,i] = linalg.norm(x[:,i])#求每一个列向量的范数
return r
def myLDA(A,Labels):
# function [W,m]=myLDA(A,Label)
# computes LDA of matrix A
# A: D by N data matrix. Each column is a random vector
# W: D by K matrix whose columns are the principal components in decreasing order
# m: mean of each projection
classLabels = np.unique(Labels)
classNum = len(classLabels)
dim,datanum = A.shape
totalMean = np.mean(A,1)
partition = [np.where(Labels==label)[0] for label in classLabels]
classMean = [(np.mean(A[:,idx],1),len(idx)) for idx in partition]
#compute the within-class scatter matrix
W = np.zeros((dim,dim))
for idx in partition:
W += np.cov(A[:,idx],rowvar=1)*len(idx)
#compute the between-class scatter matrix
B = np.zeros((dim,dim))
for mu,class_size in classMean:
offset = mu - totalMean
B += np.outer(offset,offset)*class_size
#solve the generalized eigenvalue problem for discriminant directions
ew, ev = linalg.eig(B, W)
sorted_pairs = sorted(enumerate(ew), key=operator.itemgetter(1), reverse=True)
selected_ind = [ind for ind,val in sorted_pairs[:classNum-1]]
LDAW = ev[:,selected_ind]
Centers = [np.dot(mu,LDAW) for mu,class_size in classMean]
Centers = np.array(Centers).T
return LDAW, Centers, classLabels
def myPCA(A):
# function [W,LL,m]=mypca(A)
# computes PCA of matrix A
# A: D by N data matrix. Each column is a random vector
# W: D by K matrix whose columns are the principal components in decreasing order
# LL: eigenvalues
# m: mean of columns of A
# Note: "lambda" is a Python reserved word
# compute mean, and subtract mean from every column
[r,c] = A.shape#A是22400*120矩阵
m = np.mean(A,1)#求每一行均值,m应该是1*22400的矩阵
A = A - np.tile(m, (c,1)).T#零均值化,此时A任然是22400*120矩阵
B = np.dot(A.T, A)
[d,v] = linalg.eig(B)#求A.T*A的特征值d(返回一行向量,120个)与归一化的特征向量v,v的第i列对应第i个特征值
# sort d in descending order
order_index = np.argsort(d)
order_index = order_index[::-1]#将特征值从大到小排列
d = d[order_index]
v = v[:, order_index]#将特征向量按特征值排列
# compute eigenvectors of scatter matrix
W = np.dot(A,v)#根据课件 Av is eigenvector of AA.T,此时的W是AA.t的特征向量
Wnorm = ComputeNorm(W)
W1 = np.tile(Wnorm, (r, 1))
W2 = W / W1#标准化特征矩阵?
LL = d[0:-1]#特征值,省略最后一个
W = W2[:,0:-1] #omit last column, which is the nullspace,特征向量
return W, LL, m
def read_faces(directory):
# function faces = read_faces(directory)
# Browse the directory, read image files and store faces in a matrix
# faces: face matrix in which each colummn is a colummn vector for 1 face image
# idLabels: corresponding ids for face matrix
A = [] # A will store list of image vectors
Label = [] # Label will store list of identity label
# browsing the directory
for f in os.listdir(directory):
if not f[-3:] =='bmp':
continue
infile = os.path.join(directory, f)
im = cv2.imread(infile, 0)
# turn an array into vector
im_vec = np.reshape(im, -1)
A.append(im_vec)
name = f.split('_')[0][-1]
Label.append(int(name))
faces = np.array(A, dtype=np.float32)
faces = faces.T
idLabel = np.array(Label)
return faces,idLabel
def float2uint8(arr):
mmin = arr.min()
mmax = arr.max()
arr = (arr-mmin)/(mmax-mmin)*255
arr = np.uint8(arr)
return arr
'''PCA feature'''
def PCA(k):
dir='C:/Users/yu/Desktop/face/train'
faces,idlable=read_faces(dir)
[r,c]=np.shape(faces)
W,LL,m=myPCA(faces)
We=W[:,:k]
y=np.dot(We.T,(faces-np.tile(m,(c,1)).T))
x=np.dot(We,y)+np.tile(m,(c,1)).T
return x,y,W,LL,m,We
'''LDA feature'''
def LDA(k):
dir='C:/Users/yu/Desktop/face/train'
faces,idlable=read_faces(dir)
[r,c]=np.shape(faces)
W,LL,m=myPCA(faces)
W1=W[:,:k]
x1=np.dot(W1.T,(faces-np.tile(m,(c,1)).T))
Wf,C,classlabel=myLDA(x1,idlable)
y=np.dot(Wf.T,np.dot(W1.T,(faces-np.tile(m,(c,1)).T)))
return y,Wf,W1,C,classlabel
'''enrollment'''
def enrollment(y1,C):#y1 is for PCA,C is for LDA
Z1=[]#PCA
Z2=[]#LDA, Z2 is exactly the Centers returned by myLDA function
for i in range(0,10):
y11=y1[:,i*12:(i*12+12)]
Z1.append(np.mean(y11,1))
Z1=np.transpose(Z1)
Z2=C
return Z1,Z2
def distance(z,b):#计算欧氏距离
x=z.shape[0]
bb=np.tile(b,(x,1))
dis=bb-z
dis=dis**2
Dis=dis.sum(axis=1)
Dis=Dis**0.5
# dis=linalg.norm(z-bb)
return Dis
def ConMat(We,Wf,W1,m,z1,z2,alpha):
CM1=np.zeros((10,10))
CM2=np.zeros((10,10))
dir='C:/Users/yu/Desktop/face/test'
faces,idlabel=read_faces(dir)
[r,c]= | np.shape(faces) | numpy.shape |
import copy
import numpy as np
from numba import jit
from State import State
import logging
#logging.basicConfig(filename="logs/Environment.log", level=logging.INFO)
class Environment:
id_cache={}
def __init__(self, NUM_ROWS, NUM_COLS, DEPTH):
self.state = None
self.prev_states = set()
self.duplicate_states = set()
self.draw_flag = False
self.turn = None
self.temp_cp = None
self.pieces = []
self.name = 'Gobblet'
self.NUM_ROWS = NUM_ROWS
self.NUM_COLS = NUM_COLS
self.DEPTH = DEPTH
self.reset()
def reset(self):
# resets the board to be empty and the turn to be 'X'
self.state = State(np.array([[0 for i in range(self.NUM_COLS)] for j in range(self.NUM_COLS)]))
self.moves_made = set()
self.duplicate_moves = set()
self.draw_flag = False
self.turn = 1
self.initialize_pieces()
# @jit(nopython=False)
def update(self, action, turn=0, check_legal=True):
# updates the board given an action represented as 2 indicies e.g. [0, 2]
# returns [next_state, result]
# where next_state is the board after action is taken
logging.info("Storing copy of board . . .")
self.temp_cp = self.copy()
piece, location = action
if type(piece) != dict:
piece = self.pieces[self.pieces_idx][piece]
location = (location // self.NUM_COLS, location % self.NUM_COLS)
logging.info("Ensuring the move is legal . . .")
if not self.is_legal((piece, location)):
if check_legal:
print(self.state)
raise ValueError("The action {} is not legal".format((str(piece), location)))
else:
return (self.state, 10*self.turn)
if turn == 0:
turn = self.turn
logging.info("Accounting for special rule . . .")
if isinstance(piece["location"], tuple):
# if the piece was on the board, set its origin to be empty
self.state.board[piece["location"]] = 0
# if it was covering another piece, propogate the change upwards
lower_loc = self.state.lower_layers[0][tuple(piece["location"])]
if lower_loc != 0:
self.undo_lower_layers(tuple(piece["location"]))
# update the board and the player
logging.info("Updating the board and pieces . . .")
prev_occupant = int(self.state.board[location])
self.state.board[location] = self.turn * piece["size"]
piece["location"] = location
self.update_pieces()
logging.info("Updating lower layers . . .")
if prev_occupant != 0:
self.update_lower_layers((piece, location), prev_occupant)
#
# if self["id"] in self.duplicate_states:
# self.draw_flag=True
# elif self.id in self.moves_made:
# self.duplicate_states.add(self.id)
# else:
# self.moves_made.add(self.id)
result = self.get_result(self.state)
# update the turn tracker
self.turn *= -1
return (self.copy(), result, result is not None)
def undo_move(self):
# undo the last move made
self.state = self.temp_cp.state
self.pieces = self.temp_cp.pieces
self.turn = self.temp_cp.turn
def undo_lower_layers(self, location, i=-1):
if i < 0:
layer = self.state.board
else:
layer = self.state.lower_layers[i]
next_layer = self.state.lower_layers[i+1]
dest = next_layer[location]
layer[location] = int(dest)
next_layer[location] = 0
# if i+2 > self.DEPTH-1 and self.state.lower_layers[i+2][location] != 0:
# self.undo_lower_layers(location, i+1)
#
exit_flag = False
for idx, stack in enumerate(self.pieces):
if exit_flag:
break
for p in stack:
if p["location"] == location and p["size"] == abs(dest):
if (idx == 0 and np.sign(dest) == 1) or (idx == 1 and np.sign(dest) == -1):
p["stack_number"] -= 1
exit_flag = True
def update_lower_layers(self, action, prev_occupant, i=0):
# propogate changes throughout lower layers recursively
piece, location = action
layer = self.state.lower_layers[i]
dest = layer[location]
if dest != 0 and i < 2:
try:
self.update_lower_layers(action, dest, i+1)
except IndexError:
print("IndexError in update_lower_layers")
dest = self.turn * piece["size"]
self.state.lower_layers[i, location[0], location[1]] = prev_occupant
exit_flag = False
for idx, stack in enumerate(self.pieces):
if exit_flag:
break
for p in stack:
if p["location"] == location and p["size"] == abs(prev_occupant):
if (idx == 0 and np.sign(prev_occupant) == 1) or (idx == 1 and np.sign(prev_occupant) == -1):
p["stack_number"] += 1
exit_flag = True
break
#@jit
def get_result(self, state):
# returns None if the game isn't over, 1 if white wins and -1 if black wins
# check rows
for row in state.board:
ones = np.sign(row)
if abs(sum(ones)) == self.NUM_ROWS:
return sum(ones) / self.NUM_ROWS
# check columns
cols = state.board.copy()
cols.transpose()
for col in cols:
ones = np.sign(col)
if abs(sum(ones)) == self.NUM_COLS:
return sum(ones) / self.NUM_COLS
# check diagonals
diags = [state.board.diagonal(), np.fliplr(state.board).diagonal()]
for diag in diags:
ones = np.sign(diag)
if abs(sum(ones)) == self.NUM_ROWS:
return sum(ones) / self.NUM_ROWS
# check for draws
# that is, if three identical moves have been made, it's a draw
if self.draw_flag or len(self.prev_states) > 50:
#print("DRAW BY {}".format("REPETITION" if self.draw_flag else "NO MOVES"))
return 0
return None
def is_legal(self, action, verbose=True):
piece, location = action
if type(piece) != dict:
piece = self.pieces[self.pieces_idx][piece]
location = (location // self.NUM_COLS, location % self.NUM_COLS)
curr_piece = self.state.board[location]
# the piece has to be bigger than the one currently there
if not piece["stack_number"] == 0 or piece["size"] <= abs(curr_piece):
return False
# implement the rule that a new gobblet on the board must be on an empty space
if not isinstance(piece["location"], tuple) and curr_piece != 0:
# exception: if there is three in a row through the desired location, the move is valid
row = self.state.board[location[0]]
col = self.state.board[:, location[1]]
diag = [0 for i in range(self.NUM_ROWS)]
if location[0]==location[1]:
diag = self.state.board.diagonal()
elif location[0]+location[1] == self.NUM_ROWS-1:
diag = np.fliplr(self.state.board).diagonal()
flag = False
for i in [row, col, diag]:
if flag:
break
counter = 0
for j in np.squeeze(i):
if j != 0:
counter += 1
if counter==3:
flag = True
break
if not flag:
return False
return True
def get_legal_moves_idxs(self):
# returns the legal moves that can be taken
moves = []
illegal_moves = []
add_move = moves.append
is_valid_move = self.is_legal
for idx, i in enumerate(self.state.board):
for jIdx, j in enumerate(i):
for piece in self.pieces[self.pieces_idx]:
move = (piece, (idx, jIdx))
if is_valid_move(move):
add_move((piece["id"], idx*self.NUM_COLS + jIdx))
return moves
def initialize_pieces(self):
self.pieces = [[{"location":0, "size":4, "stack_number":0, "id":0}, {"location":1, "size":3, "stack_number":1, "id":1}, {"location":2, "size":2, "stack_number":2, "id":2}, {"location":3, "size":1, "stack_number":3, "id":3},
{"location":0, "size":4, "stack_number":0, "id":4}, {"location":1, "size":3, "stack_number":1, "id":5}, {"location":2, "size":2, "stack_number":2, "id":6}, {"location":3, "size":1, "stack_number":3, "id":7},
{"location":0, "size":4, "stack_number":0, "id":8}, {"location":1, "size":3, "stack_number":1, "id":9}, {"location":2, "size":2, "stack_number":2, "id":10}, {"location":3, "size":1, "stack_number":3, "id":11}],
[{"location":0, "size":4, "stack_number":0, "id":0}, {"location":1, "size":3, "stack_number":1, "id":1}, {"location":2, "size":2, "stack_number":2, "id":2}, {"location":3, "size":1, "stack_number":3, "id":3},
{"location":0, "size":4, "stack_number":0, "id":4}, {"location":1, "size":3, "stack_number":1, "id":5}, {"location":2, "size":2, "stack_number":2, "id":6}, {"location":3, "size":1, "stack_number":3, "id":7},
{"location":0, "size":4, "stack_number":0, "id":8}, {"location":1, "size":3, "stack_number":1, "id":9}, {"location":2, "size":2, "stack_number":2, "id":10}, {"location":3, "size":1, "stack_number":3, "id":11}]
]
@property
def pieces_idx(self):
return 0 if self.turn == 1 else 1
def update_pieces(self):
for player in self.pieces:
for piece in player:
if piece["id"] % 4 == 0:
counter = 0
if isinstance(piece["location"], tuple):
continue
piece["location"] = counter
piece["stack_number"] = counter
counter += 1
@property
def binary(self):
# convert the state to a binary matrix
# final format will be 4 x 4 x 9 (ignore hidden pieces and use only current state for now)
plane_dim = self.state.board.shape
color = np.zeros(plane_dim) if self.turn == -1 else np.ones(plane_dim)
board = self.state.board.reshape(-1) #np.append(self.state.board.reshape(-1), self.state.lower_layers.reshape(-1))
p1_pos = np.zeros((plane_dim[0], plane_dim[1], 4))
p1_locs = np.argwhere(np.sign(board.reshape(plane_dim))==1)
p1_pos[4-np.abs(self.state.board[p1_locs[:,0], p1_locs[:,1]]), p1_locs[:,0], p1_locs[:,1]] = 1
p2_pos = np.zeros((plane_dim[0], plane_dim[1], 4))
p2_locs = np.argwhere(np.sign(board.reshape(plane_dim))==-1)
p2_pos[4-np.abs(self.state.board[p2_locs[:,0], p2_locs[:,1]]), p2_locs[:,0], p2_locs[:,1]] = 1
p2_pos = | np.append(p2_pos, color) | numpy.append |
"""
<NAME>
University of Manitoba
November 8th, 2018
"""
import numpy as np
from umbms.beamform.extras import get_xy_arrs
###############################################################################
# The vacuum permeability and permittivity
vac_permeability = 1.256637e-6
vac_permittivity = 8.85e-12
vac_speed = 3e8 # The speed of light in vacuum
# The permittivities of the breast tissue analogs used in the lab at the
# central frequency (glycerin for fat, 30% Triton X-100 solution for
# fibroglandular, and saline solution for tumor)
measured_air_perm = 1
measured_adi_perm = 7.08
measured_fib_perm = 44.94
measured_tum_perm = 77.11
###############################################################################
def get_roi(roi_rad, m_size, ant_rad):
"""Return binary mask for central circular region of interest
Returns a binary mask in which a circular region of interest is set
to True and the region outside is set to False
Parameters
----------
roi_rad : float
The radius (in meters) of the inner circular region of interest
m_size : int
The number of pixels along one dimension used to define the
model-space
ant_rad : float
The radius of the antenna scan trajectory in meters
"""
# Get arrays for the x,y positions of each pixel
pix_xs, pix_ys = get_xy_arrs(m_size, ant_rad)
# Find the distance from each pixel to the center of the model space
pix_dist_from_center = np.sqrt(pix_xs**2 + pix_ys**2)
# Get the region of interest as all the pixels inside the
# circle-of-interest
roi = np.zeros([m_size, m_size], dtype=np.bool)
roi[pix_dist_from_center < roi_rad] = True
return roi
def get_breast(m_size=500, ant_rad=0.21, adi_rad=0.00, adi_x=0.0, adi_y=0.0,
fib_rad=0.0, fib_x=0.0, fib_y=0.0, tum_rad=0.0, tum_x=0.0375,
tum_y=0.0375, skin_thickness=0.0, adi_perm=6.4, fib_perm=42.2,
tum_perm=75.4, skin_perm=40, air_perm=1.0):
"""Returns a 2D breast model
Returns a breast model containing selected tissue components.
Each tissue (excluding skin) is modeled using a circular region
and assigned a permittivity corresponding to the measured
permittivity at the central scan frequency of the corresponding
tissue surrogate.
Parameters
----------
m_size : int
The number of pixels along one dimension in the model space
ant_rad : float
The radius (in meters) of the antenna trajectory during the scan
adi_rad : float
The radius of the adipose tissue component in meters
adi_x : float
The offset of the adipose tissue component in the x-direction in
meters
adi_y : float
The offset of the adipose tissue component in the y-direction in
meters
fib_rad : float
The radius of the fibroglandular tissue component in meters
fib_x : float
The offset of the fibroglandular tissue component in the x-direction in
meters
fib_y : float
The offset of the fibroglandular tissue component in the y-direction in
meters
tum_rad : float
The radius of the tumor tissue component in meters
tum_x : float
The offset of the tumor tissue component in the x-direction in
meters
tum_y : float
The offset of the tumor tissue component in the y-direction in
meters
skin_thickness : float
The thickness of the skin tissue component in meters
adi_perm : float
The permittivity of the adipose component
fib_perm : float
The permittivity of the fibroglandular component
tum_perm : float
The permittivity of the tumor component
skin_perm : float
The permittivity of the skin component
air_perm : float
The permittivity of the surrounding medium (assumed to be air)
Returns
-------
breast_model : array_like
2D arr containing the breast model
"""
# Get the pixel x,y-positions
pix_xs, pix_ys = get_xy_arrs(m_size, ant_rad)
# Compute the pixel distances from the center of each tissue
# component (excluding skin)
pix_dist_from_adi = | np.sqrt((pix_xs - adi_x)**2 + (pix_ys - adi_y)**2) | numpy.sqrt |
import numpy as np
import pyart
import scipy.ndimage.filters
def J_function(winds, parameters):
"""
Calculates the total cost function. This typically does not need to be
called directly as get_dd_wind_field is a wrapper around this function and
:py:func:`pydda.cost_functions.grad_J`.
In order to add more terms to the cost function, modify this
function and :py:func:`pydda.cost_functions.grad_J`.
Parameters
----------
winds: 1-D float array
The wind field, flattened to 1-D for f_min. The total size of the
array will be a 1D array of 3*nx*ny*nz elements.
parameters: DDParameters
The parameters for the cost function evaluation as specified by the
:py:func:`pydda.retrieval.DDParameters` class.
Returns
-------
J: float
The value of the cost function
"""
winds = np.reshape(winds,
(3, parameters.grid_shape[0], parameters.grid_shape[1],
parameters.grid_shape[2]))
Jvel = calculate_radial_vel_cost_function(
parameters.vrs, parameters.azs, parameters.els,
winds[0], winds[1], winds[2], parameters.wts, rmsVr=parameters.rmsVr,
weights=parameters.weights, coeff=parameters.Co)
if(parameters.Cm > 0):
Jmass = calculate_mass_continuity(
winds[0], winds[1], winds[2], parameters.z,
parameters.dx, parameters.dy, parameters.dz,
coeff=parameters.Cm)
else:
Jmass = 0
if(parameters.Cx > 0 or parameters.Cy > 0 or parameters.Cz > 0):
Jsmooth = calculate_smoothness_cost(
winds[0], winds[1], winds[2], Cx=parameters.Cx,
Cy=parameters.Cy, Cz=parameters.Cz)
else:
Jsmooth = 0
if(parameters.Cb > 0):
Jbackground = calculate_background_cost(
winds[0], winds[1], winds[2], parameters.bg_weights,
parameters.u_back, parameters.v_back, parameters.Cb)
else:
Jbackground = 0
if(parameters.Cv > 0):
Jvorticity = calculate_vertical_vorticity_cost(
winds[0], winds[1], winds[2], parameters.dx,
parameters.dy, parameters.dz, parameters.Ut,
parameters.Vt, coeff=parameters.Cv)
else:
Jvorticity = 0
if(parameters.Cmod > 0):
Jmod = calculate_model_cost(
winds[0], winds[1], winds[2],
parameters.model_weights, parameters.u_model,
parameters.v_model,
parameters.w_model, coeff=parameters.Cmod)
else:
Jmod = 0
if parameters.Cpoint > 0:
Jpoint = calculate_point_cost(
winds[0], winds[1], parameters.x, parameters.y, parameters.z,
parameters.point_list, Cp=parameters.Cpoint, roi=parameters.roi)
else:
Jpoint = 0
if(parameters.print_out is True):
print(('| Jvel | Jmass | Jsmooth | Jbg | Jvort | Jmodel | Jpoint |' +
' Max w '))
print(('|' + "{:9.4f}".format(Jvel) + '|' +
"{:9.4f}".format(Jmass) + '|' +
"{:9.4f}".format(Jsmooth) + '|' +
"{:9.4f}".format(Jbackground) + '|' +
"{:9.4f}".format(Jvorticity) + '|' +
"{:9.4f}".format(Jmod) + '|' +
"{:9.4f}".format(Jpoint)) + '|' +
"{:9.4f}".format(np.ma.max(np.ma.abs(winds[2]))))
return Jvel + Jmass + Jsmooth + Jbackground + Jvorticity + Jmod + Jpoint
def grad_J(winds, parameters):
"""
Calculates the gradient of the cost function. This typically does not need
to be called directly as get_dd_wind_field is a wrapper around this
function and :py:func:`pydda.cost_functions.J_function`.
In order to add more terms to the cost function,
modify this function and :py:func:`pydda.cost_functions.grad_J`.
Parameters
----------
winds: 1-D float array
The wind field, flattened to 1-D for f_min
parameters: DDParameters
The parameters for the cost function evaluation as specified by the
:py:func:`pydda.retrieve.DDParameters` class.
Returns
-------
grad: 1D float array
Gradient vector of cost function
"""
winds = np.reshape(winds,
(3, parameters.grid_shape[0],
parameters.grid_shape[1], parameters.grid_shape[2]))
grad = calculate_grad_radial_vel(
parameters.vrs, parameters.els, parameters.azs,
winds[0], winds[1], winds[2], parameters.wts, parameters.weights,
parameters.rmsVr, coeff=parameters.Co, upper_bc=parameters.upper_bc)
if(parameters.Cm > 0):
grad += calculate_mass_continuity_gradient(
winds[0], winds[1], winds[2], parameters.z,
parameters.dx, parameters.dy, parameters.dz,
coeff=parameters.Cm, upper_bc=parameters.upper_bc)
if(parameters.Cx > 0 or parameters.Cy > 0 or parameters.Cz > 0):
grad += calculate_smoothness_gradient(
winds[0], winds[1], winds[2], Cx=parameters.Cx,
Cy=parameters.Cy, Cz=parameters.Cz, upper_bc=parameters.upper_bc)
if(parameters.Cb > 0):
grad += calculate_background_gradient(
winds[0], winds[1], winds[2], parameters.bg_weights,
parameters.u_back, parameters.v_back, parameters.Cb,
upper_bc=parameters.upper_bc)
if(parameters.Cv > 0):
grad += calculate_vertical_vorticity_gradient(
winds[0], winds[1], winds[2], parameters.dx,
parameters.dy, parameters.dz, parameters.Ut,
parameters.Vt, coeff=parameters.Cv)
if(parameters.Cmod > 0):
grad += calculate_model_gradient(
winds[0], winds[1], winds[2],
parameters.model_weights, parameters.u_model, parameters.v_model,
parameters.w_model, coeff=parameters.Cmod)
if parameters.Cpoint > 0:
grad += calculate_point_gradient(
winds[0], winds[1], parameters.x, parameters.y, parameters.z,
parameters.point_list, Cp=parameters.Cpoint, roi=parameters.roi)
if(parameters.print_out is True):
print('Norm of gradient: ' + str(np.linalg.norm(grad, np.inf)))
return grad
def calculate_radial_vel_cost_function(vrs, azs, els, u, v,
w, wts, rmsVr, weights, coeff=1.0):
"""
Calculates the cost function due to difference of the wind field from
radar radial velocities. For more information on this cost function, see
Potvin et al. (2012) and Shapiro et al. (2009).
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
vrs: List of float arrays
List of radial velocities from each radar
els: List of float arrays
List of elevations from each radar
azs: List of float arrays
List of azimuths from each radar
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
wts: List of float arrays
Float array containing fall speed from radar.
rmsVr: float
The sum of squares of velocity/num_points. Use for normalization
of data weighting coefficient
weights: n_radars x_bins x y_bins float array
Data weights for each pair of radars
coeff: float
Constant for cost function
Returns
-------
J_o: float
Observational cost function
References
-----------
<NAME>., <NAME>, and <NAME>, 2012: Impact of a Vertical Vorticity
Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and
Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49,
https://doi.org/10.1175/JTECH-D-11-00019.1
<NAME>., <NAME>, and <NAME>, 2009: Use of a Vertical Vorticity
Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic
Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1
"""
J_o = 0
lambda_o = coeff / (rmsVr * rmsVr)
for i in range(len(vrs)):
v_ar = (np.cos(els[i])*np.sin(azs[i])*u +
np.cos(els[i])*np.cos(azs[i])*v +
np.sin(els[i])*(w - np.abs(wts[i])))
the_weight = weights[i]
the_weight[els[i].mask] = 0
the_weight[azs[i].mask] = 0
the_weight[vrs[i].mask] = 0
the_weight[wts[i].mask] = 0
J_o += lambda_o*np.sum(np.square(vrs[i] - v_ar)*the_weight)
return J_o
def calculate_grad_radial_vel(vrs, els, azs, u, v, w,
wts, weights, rmsVr, coeff=1.0, upper_bc=True):
"""
Calculates the gradient of the cost function due to difference of wind
field from radar radial velocities.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
vrs: List of float arrays
List of radial velocities from each radar
els: List of float arrays
List of elevations from each radar
azs: List of azimuths
List of azimuths from each radar
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
coeff: float
Constant for cost function
vel_name: str
Background velocity field name
weights: n_radars x_bins x y_bins float array
Data weights for each pair of radars
Returns
-------
y: 1-D float array
Gradient vector of observational cost function.
More information
----------------
The gradient is calculated by taking the functional derivative of the
cost function. For more information on functional derivatives, see the
Euler-Lagrange Equation:
https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
"""
# Use zero for all masked values since we don't want to add them into
# the cost function
p_x1 = np.zeros(vrs[0].shape)
p_y1 = np.zeros(vrs[0].shape)
p_z1 = np.zeros(vrs[0].shape)
lambda_o = coeff / (rmsVr * rmsVr)
for i in range(len(vrs)):
v_ar = (np.cos(els[i])*np.sin(azs[i])*u +
np.cos(els[i])*np.cos(azs[i])*v +
np.sin(els[i])*(w - np.abs(wts[i])))
x_grad = (2*(v_ar - vrs[i]) * np.cos(els[i]) *
np.sin(azs[i]) * weights[i]) * lambda_o
y_grad = (2*(v_ar - vrs[i]) * np.cos(els[i]) *
np.cos(azs[i]) * weights[i]) * lambda_o
z_grad = (2*(v_ar - vrs[i]) * np.sin(els[i]) * weights[i]) * lambda_o
x_grad[els[i].mask] = 0
y_grad[els[i].mask] = 0
z_grad[els[i].mask] = 0
x_grad[azs[i].mask] = 0
y_grad[azs[i].mask] = 0
z_grad[azs[i].mask] = 0
x_grad[els[i].mask] = 0
x_grad[azs[i].mask] = 0
x_grad[vrs[i].mask] = 0
x_grad[wts[i].mask] = 0
y_grad[els[i].mask] = 0
y_grad[azs[i].mask] = 0
y_grad[vrs[i].mask] = 0
y_grad[wts[i].mask] = 0
z_grad[els[i].mask] = 0
z_grad[azs[i].mask] = 0
z_grad[vrs[i].mask] = 0
z_grad[wts[i].mask] = 0
p_x1 += x_grad
p_y1 += y_grad
p_z1 += z_grad
# Impermeability condition
p_z1[0, :, :] = 0
if(upper_bc is True):
p_z1[-1, :, :] = 0
y = np.stack((p_x1, p_y1, p_z1), axis=0)
return y.flatten()
def calculate_smoothness_cost(u, v, w, Cx=1e-5, Cy=1e-5, Cz=1e-5):
"""
Calculates the smoothness cost function by taking the Laplacian of the
wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
Cx: float
Constant controlling smoothness in x-direction
Cy: float
Constant controlling smoothness in y-direction
Cz: float
Constant controlling smoothness in z-direction
Returns
-------
Js: float
value of smoothness cost function
"""
du = np.zeros(w.shape)
dv = np.zeros(w.shape)
dw = np.zeros(w.shape)
scipy.ndimage.filters.laplace(u, du, mode='wrap')
scipy.ndimage.filters.laplace(v, dv, mode='wrap')
scipy.ndimage.filters.laplace(w, dw, mode='wrap')
return np.sum(Cx*du**2 + Cy*dv**2 + Cz*dw**2)
def calculate_smoothness_gradient(u, v, w, Cx=1e-5, Cy=1e-5, Cz=1e-5,
upper_bc=True):
"""
Calculates the gradient of the smoothness cost function
by taking the Laplacian of the Laplacian of the wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
Cx: float
Constant controlling smoothness in x-direction
Cy: float
Constant controlling smoothness in y-direction
Cz: float
Constant controlling smoothness in z-direction
Returns
-------
y: float array
value of gradient of smoothness cost function
"""
du = np.zeros(w.shape)
dv = np.zeros(w.shape)
dw = np.zeros(w.shape)
grad_u = np.zeros(w.shape)
grad_v = np.zeros(w.shape)
grad_w = np.zeros(w.shape)
scipy.ndimage.filters.laplace(u, du, mode='wrap')
scipy.ndimage.filters.laplace(v, dv, mode='wrap')
scipy.ndimage.filters.laplace(w, dw, mode='wrap')
scipy.ndimage.filters.laplace(du, grad_u, mode='wrap')
scipy.ndimage.filters.laplace(dv, grad_v, mode='wrap')
scipy.ndimage.filters.laplace(dw, grad_w, mode='wrap')
# Impermeability condition
grad_w[0, :, :] = 0
if(upper_bc is True):
grad_w[-1, :, :] = 0
y = np.stack([grad_u*Cx*2, grad_v*Cy*2, grad_w*Cz*2], axis=0)
return y.flatten()
def calculate_point_cost(u, v, x, y, z, point_list, Cp=1e-3, roi=500.0):
"""
Calculates the cost function related to point observations. A mean square error cost
function term is applied to points that are within the sphere of influence
whose radius is determined by *roi*.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
x: Float array
X coordinates of grid centers
y: Float array
Y coordinates of grid centers
z: Float array
Z coordinated of grid centers
point_list: list of dicts
List of point constraints.
Each member is a dict with keys of "u", "v", to correspond
to each component of the wind field and "x", "y", "z"
to correspond to the location of the point observation.
In addition, "site_id" gives the METAR code (or name) to the station.
Cp: float
The weighting coefficient of the point cost function.
roi: float
Radius of influence of observations
Returns
-------
J: float
The cost function related to the difference between wind field and points.
"""
J = 0.0
for the_point in point_list:
# Instead of worrying about whole domain, just find points in radius of influence
# Since we know that the weight will be zero outside the sphere of influence anyways
the_box = np.where(np.logical_and.reduce(
(np.abs(x - the_point["x"]) < roi, np.abs(y - the_point["y"]) < roi,
np.abs(z - the_point["z"]) < roi)))
J += np.sum(((u[the_box] - the_point["u"])**2 + (v[the_box] - the_point["v"])**2))
return J * Cp
def calculate_point_gradient(u, v, x, y, z, point_list, Cp=1e-3, roi=500.0):
"""
Calculates the gradient of the cost function related to point observations.
A mean square error cost function term is applied to points that are within the sphere of influence
whose radius is determined by *roi*.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
x: Float array
X coordinates of grid centers
y: Float array
Y coordinates of grid centers
z: Float array
Z coordinated of grid centers
point_list: list of dicts
List of point constraints. Each member is a dict with keys of "u", "v",
to correspond to each component of the wind field and "x", "y", "z"
to correspond to the location of the point observation.
In addition, "site_id" gives the METAR code (or name) to the station.
Cp: float
The weighting coefficient of the point cost function.
roi: float
Radius of influence of observations
Returns
-------
gradJ: float array
The gradient of the cost function related to the difference between wind field and points.
"""
gradJ_u = np.zeros_like(u)
gradJ_v = np.zeros_like(v)
gradJ_w = np.zeros_like(u)
for the_point in point_list:
the_box = np.where(np.logical_and.reduce(
(np.abs(x - the_point["x"]) < roi, np.abs(y - the_point["y"]) < roi,
np.abs(z - the_point["z"]) < roi)))
gradJ_u[the_box] += 2 * (u[the_box] - the_point["u"])
gradJ_v[the_box] += 2 * (v[the_box] - the_point["v"])
gradJ = np.stack([gradJ_u, gradJ_v, gradJ_w], axis=0).flatten()
return gradJ * Cp
def calculate_mass_continuity(u, v, w, z, dx, dy, dz, coeff=1500.0, anel=1):
"""
Calculates the mass continuity cost function by taking the divergence
of the wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
dx: float
Grid spacing in x direction.
dy: float
Grid spacing in y direction.
dz: float
Grid spacing in z direction.
z: Float array (1D)
1D Float array with heights of grid
coeff: float
Constant controlling contribution of mass continuity to cost function
anel: int
= 1 use anelastic approximation, 0=don't
Returns
-------
J: float
value of mass continuity cost function
"""
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=1)
dwdz = np.gradient(w, dz, axis=0)
if(anel == 1):
rho = np.exp(-z/10000.0)
drho_dz = np.gradient(rho, dz, axis=0)
anel_term = w/rho*drho_dz
else:
anel_term = np.zeros(w.shape)
return coeff*np.sum(np.square(dudx + dvdy + dwdz + anel_term))/2.0
def calculate_mass_continuity_gradient(u, v, w, z, dx,
dy, dz, coeff=1500.0, anel=1,
upper_bc=True):
"""
Calculates the gradient of mass continuity cost function. This is done by
taking the negative gradient of the divergence of the wind field.
All grids must have the same grid specification.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
z: Float array (1D)
1D Float array with heights of grid
dx: float
Grid spacing in x direction.
dy: float
Grid spacing in y direction.
dz: float
Grid spacing in z direction.
coeff: float
Constant controlling contribution of mass continuity to cost function
anel: int
= 1 use anelastic approximation, 0=don't
Returns
-------
y: float array
value of gradient of mass continuity cost function
"""
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=1)
dwdz = np.gradient(w, dz, axis=0)
if(anel == 1):
rho = np.exp(-z/10000.0)
drho_dz = np.gradient(rho, dz, axis=0)
anel_term = w/rho*drho_dz
else:
anel_term = 0
div2 = dudx + dvdy + dwdz + anel_term
grad_u = -np.gradient(div2, dx, axis=2)*coeff
grad_v = -np.gradient(div2, dy, axis=1)*coeff
grad_w = -np.gradient(div2, dz, axis=0)*coeff
# Impermeability condition
grad_w[0, :, :] = 0
if(upper_bc is True):
grad_w[-1, :, :] = 0
y = np.stack([grad_u, grad_v, grad_w], axis=0)
return y.flatten()
def calculate_fall_speed(grid, refl_field=None, frz=4500.0):
"""
Estimates fall speed based on reflectivity.
Uses methodology of <NAME> and <NAME>
Parameters
----------
Grid: Py-ART Grid
Py-ART Grid containing reflectivity to calculate fall speed from
refl_field: str
String containing name of reflectivity field. None will automatically
determine the name.
frz: float
Height of freezing level in m
Returns
-------
3D float array:
Float array of terminal velocities
"""
# Parse names of velocity field
if refl_field is None:
refl_field = pyart.config.get_field_name('reflectivity')
refl = grid.fields[refl_field]['data']
grid_z = grid.point_z['data']
term_vel = np.zeros(refl.shape)
A = np.zeros(refl.shape)
B = np.zeros(refl.shape)
rho = np.exp(-grid_z/10000.0)
A[np.logical_and(grid_z < frz, refl < 55)] = -2.6
B[np.logical_and(grid_z < frz, refl < 55)] = 0.0107
A[np.logical_and(grid_z < frz,
np.logical_and(refl >= 55, refl < 60))] = -2.5
B[np.logical_and(grid_z < frz,
np.logical_and(refl >= 55, refl < 60))] = 0.013
A[np.logical_and(grid_z < frz, refl > 60)] = -3.95
B[np.logical_and(grid_z < frz, refl > 60)] = 0.0148
A[np.logical_and(grid_z >= frz, refl < 33)] = -0.817
B[np.logical_and(grid_z >= frz, refl < 33)] = 0.0063
A[np.logical_and(grid_z >= frz,
np.logical_and(refl >= 33, refl < 49))] = -2.5
B[np.logical_and(grid_z >= frz,
np.logical_and(refl >= 33, refl < 49))] = 0.013
A[np.logical_and(grid_z >= frz, refl > 49)] = -3.95
B[np.logical_and(grid_z >= frz, refl > 49)] = 0.0148
fallspeed = A*np.power(10, refl*B)*np.power(1.2/rho, 0.4)
del A, B, rho
return fallspeed
def calculate_background_cost(u, v, w, weights, u_back, v_back, Cb=0.01):
"""
Calculates the background cost function. The background cost function is
simply the sum of the squared differences between the wind field and the
background wind field multiplied by the weighting coefficient.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
weights: Float array
Weights for each point to consider into cost function
u_back: 1D float array
Zonal winds vs height from sounding
w_back: 1D float array
Meridional winds vs height from sounding
Cb: float
Weight of background constraint to total cost function
Returns
-------
cost: float
value of background cost function
"""
the_shape = u.shape
cost = 0
for i in range(the_shape[0]):
cost += (Cb*np.sum(np.square(u[i]-u_back[i])*(weights[i]) +
np.square(v[i]-v_back[i])*(weights[i])))
return cost
def calculate_background_gradient(u, v, w, weights, u_back, v_back, Cb=0.01):
"""
Calculates the gradient of the background cost function. For each u, v
this is given as 2*coefficent*(analysis wind - background wind).
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
weights: Float array
Weights for each point to consider into cost function
u_back: 1D float array
Zonal winds vs height from sounding
w_back: 1D float array
Meridional winds vs height from sounding
Cb: float
Weight of background constraint to total cost function
Returns
-------
y: float array
value of gradient of background cost function
"""
the_shape = u.shape
u_grad = np.zeros(the_shape)
v_grad = np.zeros(the_shape)
w_grad = np.zeros(the_shape)
for i in range(the_shape[0]):
u_grad[i] = Cb*2*(u[i]-u_back[i])*(weights[i])
v_grad[i] = Cb*2*(v[i]-v_back[i])*(weights[i])
y = np.stack([u_grad, v_grad, w_grad], axis=0)
return y.flatten()
def calculate_vertical_vorticity_cost(u, v, w, dx, dy, dz, Ut, Vt,
coeff=1e-5):
"""
Calculates the cost function due to deviance from vertical vorticity
equation. For more information of the vertical vorticity cost function,
see Potvin et al. (2012) and Shapiro et al. (2009).
Parameters
----------
u: 3D array
Float array with u component of wind field
v: 3D array
Float array with v component of wind field
w: 3D array
Float array with w component of wind field
dx: float array
Spacing in x grid
dy: float array
Spacing in y grid
dz: float array
Spacing in z grid
coeff: float
Weighting coefficient
Ut: float
U component of storm motion
Vt: float
V component of storm motion
Returns
-------
Jv: float
Value of vertical vorticity cost function.
References
----------
Potvin, C.K., <NAME>, and <NAME>, 2012: Impact of a Vertical Vorticity
Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and
Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49,
https://doi.org/10.1175/JTECH-D-11-00019.1
<NAME>., <NAME>, and <NAME>, 2009: Use of a Vertical Vorticity
Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic
Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1
"""
dvdz = np.gradient(v, dz, axis=0)
dudz = np.gradient(u, dz, axis=0)
dwdz = np.gradient(w, dx, axis=2)
dvdx = np.gradient(v, dx, axis=2)
dwdy = np.gradient(w, dy, axis=1)
dwdx = np.gradient(w, dx, axis=2)
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=2)
dudy = np.gradient(u, dy, axis=1)
zeta = dvdx - dudy
dzeta_dx = np.gradient(zeta, dx, axis=2)
dzeta_dy = np.gradient(zeta, dy, axis=1)
dzeta_dz = np.gradient(zeta, dz, axis=0)
jv_array = ((u - Ut) * dzeta_dx + (v - Vt) * dzeta_dy +
w * dzeta_dz + (dvdz * dwdx - dudz * dwdy) +
zeta * (dudx + dvdy))
return np.sum(coeff*jv_array**2)
def calculate_vertical_vorticity_gradient(u, v, w, dx, dy, dz, Ut, Vt,
coeff=1e-5):
"""
Calculates the gradient of the cost function due to deviance from vertical
vorticity equation. This is done by taking the functional derivative of
the vertical vorticity cost function.
Parameters
----------
u: 3D array
Float array with u component of wind field
v: 3D array
Float array with v component of wind field
w: 3D array
Float array with w component of wind field
dx: float array
Spacing in x grid
dy: float array
Spacing in y grid
dz: float array
Spacing in z grid
Ut: float
U component of storm motion
Vt: float
V component of storm motion
coeff: float
Weighting coefficient
Returns
-------
Jv: 1D float array
Value of the gradient of the vertical vorticity cost function.
References
----------
<NAME>., <NAME>, and <NAME>, 2012: Impact of a Vertical Vorticity
Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and
Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49,
https://doi.org/10.1175/JTECH-D-11-00019.1
<NAME>., <NAME>, and <NAME>, 2009: Use of a Vertical Vorticity
Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic
Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1
"""
# First derivatives
dvdz = np.gradient(v, dz, axis=0)
dudz = np.gradient(u, dz, axis=0)
dwdy = np.gradient(w, dy, axis=1)
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=2)
dwdx = np.gradient(w, dx, axis=2)
dvdx = np.gradient(v, dx, axis=2)
dwdx = np.gradient(w, dx, axis=2)
dudz = np.gradient(u, dz, axis=0)
dudy = np.gradient(u, dy, axis=1)
zeta = dvdx - dudy
dzeta_dx = np.gradient(zeta, dx, axis=2)
dzeta_dy = np.gradient(zeta, dy, axis=1)
dzeta_dz = np.gradient(zeta, dz, axis=0)
# Second deriviatives
dwdydz = np.gradient(dwdy, dz, axis=0)
dwdxdz = np.gradient(dwdx, dz, axis=0)
dudzdy = np.gradient(dudz, dy, axis=1)
dvdxdy = np.gradient(dvdx, dy, axis=1)
dudx2 = np.gradient(dudx, dx, axis=2)
dudxdy = np.gradient(dudx, dy, axis=1)
dudxdz = | np.gradient(dudx, dz, axis=0) | numpy.gradient |
#%%
"""Demonstrate the effect of compressing the number of thresholds of a random
forest."""
import sys
from numpy.linalg import LinAlgError
from tqdm import tqdm
import os.path
import numpy as np
import xgboost as xgb
sys.path.insert(1, "..")
from datasets import load_data
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, log_loss
from ttml.ttml import TTML
from ttml.tt_rlinesearch import TTLS
from ttml.forest_compression import compress_forest_thresholds
from sklearn.ensemble import (
RandomForestRegressor,
RandomForestClassifier,
)
import scipy.special
import matplotlib.pyplot as plt
dataset_name = "airfoil"
DATASET_FOLDER = "../datasets/data"
dataset = load_data.dataset_loaders[dataset_name](DATASET_FOLDER)
X = dataset["X"]
y = dataset["y"]
X_train, X_val, y_train, y_val = train_test_split(
X.astype(float),
y.astype(float),
test_size=0.2,
random_state=179,
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
max_leaf_nodes = 100
max_depth = None
if dataset["regression"]:
forest_estim = RandomForestRegressor
task = "regression"
metric = mean_squared_error
else:
forest_estim = RandomForestClassifier
task = "classification"
metric = log_loss
forest = forest_estim(
n_estimators=128,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
)
forest.fit(X_train, y_train)
# %%
# %%
# %%_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, log_loss
from ttml.ttml import TTML
from ttml.tt_rlinesearch import TTLS
from ttml.forest_compression import compress_forest_thresholds
from sklearn.ensemble import (
RandomForestRegressor,
RandomForestClassifier,
)
import scipy.special
import matplotlib.pyplot as plt
dataset_name = "airfoil"
DATASET_FOLDER = "../datasets/data"
dataset = load_data.dataset_loaders[dataset_name](DATASET_FOLDER)
X = dataset["X"]
y = dataset["y"]
X_train, X_val, y_train, y_val = train_test_split(
X.astype(float),
y.astype(float),
test_size=0.2,
random_state=179,
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
max_leaf_nodes = 100
max_depth = None
if dataset["regression"]:
forest_estim = RandomForestRegressor
task = "regression"
metric = mean_squared_error
else:
forest_estim = RandomForestClassifier
task = "classification"
metric = log_loss
forest = forest_estim(
n_estimators=128,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
)
forest.fit(X_train, y_train)
def val_loss(forest):
loss = metric(y_val, forest.predict(X_val))
return loss
uncompresed_loss = val_loss(forest)
# %%
num_thresholds = | np.arange(10, 100) | numpy.arange |
import numpy as np
from numpy import einsum
from .MaterialBase import Material
from Florence.Tensor import trace, Voigt
class TranservselyIsotropicHyperElastic(Material):
"""A compressible transervely isotropic model with the isotropic part being Mooney-Rivlin
The energy is given by:
W(C) = gamma * ( alpha*(C:I) + beta*(G:I) ) +
eta*(1-alpha)*( (N C N)**2 + N G N) - ut*J + lambda/2*(J-1)**2
ut = 2.*gamma*(alpha+2.0*beta) + 2.*(1. - gamma)*eta # for the stress to be
zero at the origin
the parameter "gamma" controls the amount of anisotropy and the vector N(ndim,1) is
the direction of anisotropy
"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(TranservselyIsotropicHyperElastic, self).__init__(mtype, ndim, **kwargs)
self.ndim = ndim
self.nvar = self.ndim
self.is_transversely_isotropic = True
self.is_nonisotropic = True
self.energy_type = "internal_energy"
self.nature = "nonlinear"
self.fields = "mechanics"
if self.ndim==3:
self.H_VoigtSize = 6
elif self.ndim==2:
self.H_VoigtSize = 3
# LOW LEVEL DISPATCHER
self.has_low_level_dispatcher = False
self.gamma = 0.5
def Hessian(self,StrainTensors,ElectricFieldx=0,elem=0,gcounter=0):
# Get material constants (5 in this case)
E = self.E
E_A = self.E_A
v = self.nu
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
F = StrainTensors['F'][gcounter]
H = J*np.linalg.inv(F).T
N = self.anisotropic_orientations[elem][:,None]
FN = np.dot(F,N)[:,0]
HN = np.dot(H,N)[:,0]
innerFN = einsum('i,i',FN,FN)
innerHN = einsum('i,i',HN,HN)
outerHN = einsum('i,j',HN,HN)
gamma = self.gamma
lamb = -(E_A*E*v)/(2.*E*v**2 + E_A*v - E_A)
ut = (E**2*v**2 + E_A*E*v**2 + E_A*E*v - E_A*E)/(2*(v + 1)*(2*E*v**2 + E_A*v - E_A))
beta = 0.
eta_1 = (E_A**2*v**2 - E_A**2 - 2*E_A*E*v**2 + E_A*E + E**2*v**2)/(4*(gamma - 1)*(v + 1)*(2*E*v**2 + E_A*v - E_A))
eta_2 = -(E_A**2*v - E_A**2 + E_A*E - E_A*E*v)/(4*(gamma - 1)*(2*E*v**2 + E_A*v - E_A))
alpha = ut - 4*gamma*beta - 2*(1-gamma)*eta_1 - 2*(1-gamma)*eta_2
alpha = alpha/2./gamma
eta = [eta_1,eta_2]
H_Voigt = 2.*gamma*beta/J* ( 2.0*einsum('ij,kl',b,b) - einsum('ik,jl',b,b) - einsum('il,jk',b,b) ) - \
(- lamb*(2.*J-1.) ) *einsum('ij,kl',I,I) + \
(ut - lamb*(J-1.) ) * ( einsum('ik,jl',I,I) + einsum('il,jk',I,I) )
for m in range(2,4):
H_Voigt += self.TransverseHessianNCN(StrainTensors,m,eta[m-2],gamma,FN,innerFN,elem,gcounter)
H_Voigt += self.TransverseHessianNGN(StrainTensors,1.,eta_1,gamma,HN,innerHN,elem,gcounter)
H_Voigt += self.TransverseHessianNGN(StrainTensors,1.,eta_2,gamma,HN,innerHN,elem,gcounter)
H_Voigt = Voigt(H_Voigt ,1)
self.H_VoigtSize = H_Voigt.shape[0]
return H_Voigt
def TransverseHessianNCN(self,StrainTensors,m,eta,gamma,FN,innerFN,elem,gcounter):
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
H_VoigtNCN = 4.*(1-gamma)*eta/J *(m-1)*(innerFN)**(m-2)*einsum('i,j,k,l',FN,FN,FN,FN)
return H_VoigtNCN
def TransverseHessianNGN(self,StrainTensors,n,eta,gamma,HN,innerHN,elem,gcounter):
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
H_VoigtNGN = 4.*(1-gamma)*eta/J * ( n*(innerHN)**n * einsum('ij,kl',I,I) - \
0.5*(innerHN)**n * ( einsum('ik,jl',I,I) + einsum('il,jk',I,I) ) - \
n*(innerHN)**(n-1)* ( einsum('ij,k,l',I,HN,HN) + einsum('i,j,kl',HN,HN,I) ) + \
(n-1.)*(innerHN)**(n-2)* einsum('i,j,k,l',HN,HN,HN,HN) ) + \
2.*(1-gamma)*eta/J *(innerHN)**(n-1)* ( | einsum('il,j,k',I,HN,HN) | numpy.einsum |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
"""
Definition of PyTorch "Dataset" that iterates through compressed videos
and return compressed representations (I-frames, motion vectors,
or residuals) for training or testing.
"""
import os
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
from coviar import get_num_frames
from coviar import load
from transforms import color_aug
from PIL import Image
from skimage.measure import block_reduce
from scipy import interpolate
import logging
logging.basicConfig(level=logging.DEBUG)
GOP_SIZE = 12
def video_path_to_flow_path(flow_root, video_path):
# example:
tmp = video_path.split('/')
return os.path.join(flow_root, tmp[-2], tmp[-1][:-4])
def clip_and_scale(img, bound):
img = img.astype(np.float)
img *= 127.5 / bound
return img
def get_seg_range(n, num_segments, seg, representation):
if representation in ['residual', 'mv', 'flow']:
n -= 1
seg_size = float(n - 1) / num_segments
seg_begin = int(np.round(seg_size * seg))
seg_end = int(np.round(seg_size * (seg+1)))
if seg_end == seg_begin:
seg_end = seg_begin + 1
if representation in ['residual', 'mv', 'flow']:
# Exclude the 0-th frame, because it's an I-frmae.
return seg_begin + 1, seg_end + 1
return seg_begin, seg_end
def get_gop_pos(frame_idx, representation):
"""given frame idx to find the group idx and the position inside a group"""
gop_index = frame_idx // GOP_SIZE
gop_pos = frame_idx % GOP_SIZE
if representation in ['residual', 'mv', 'flow']:
if gop_pos == 0: # use the previous frame's residual and MV if it's iframe
gop_index -= 1
gop_pos = GOP_SIZE - 1
else:
gop_pos = 0 # indeed find the iframe rather than rgb frame in the middle of GOP
return gop_index, gop_pos
class CoviarDataSet(data.Dataset):
def __init__(self, data_root, flow_root, data_name,
video_list,
representation,
new_length,
flow_ds_factor,
upsample_interp,
transform,
num_segments,
is_train,
accumulate,
gop,
mv_minmaxnorm=0,
viz=False,
flow_folder='tvl1'):
self._data_root = data_root
self._flow_root = flow_root
self._data_name = data_name
self._num_segments = num_segments
self._representation = representation
self._new_length = new_length
self._flow_ds_factor = flow_ds_factor
self._upsample_interp = upsample_interp
self._mv_minmaxnorm = mv_minmaxnorm
self._transform = transform
self._is_train = is_train
self._accumulate = accumulate
self._viz = viz
self._flow_folder = flow_folder
global GOP_SIZE
GOP_SIZE = gop
self._input_mean = torch.from_numpy(
np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))).float()
self._input_std = torch.from_numpy(
np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))).float()
self._load_list(video_list)
def _load_list(self, video_list):
self._video_list = []
with open(video_list, 'r') as f:
for line in f:
video, _, label = line.strip().split()
video_path = os.path.join(self._data_root, video[:-4] + '.mp4')
flow_path = video_path_to_flow_path(self._flow_root, video_path)
self._video_list.append((
video_path,
int(label),
min(get_num_frames(video_path),len(os.listdir(flow_path))/3)))
print('%d videos loaded.' % len(self._video_list))
def _get_train_frame_index(self, num_frames, seg):
# Compute the range of the segment.
seg_begin, seg_end = get_seg_range(num_frames, self._num_segments, seg,
representation=self._representation)
# Sample one frame from the segment.
v_frame_idx = random.randint(seg_begin, seg_end - 1)
return get_gop_pos(v_frame_idx, self._representation)
def _get_test_frame_index(self, num_frames, seg):
if self._representation in ['mv', 'residual', 'flow']:
num_frames -= 1
seg_size = float(num_frames - 1) / self._num_segments
v_frame_idx = int(np.round(seg_size * (seg + 0.5)))
if self._representation in ['mv', 'residual', 'flow']:
v_frame_idx += 1
return get_gop_pos(v_frame_idx, self._representation)
def __getitem__(self, index):
if self._representation == 'mv':
representation_idx = 1
elif self._representation == 'residual':
representation_idx = 2
else:
representation_idx = 0
if self._is_train:
video_path, label, num_frames = random.choice(self._video_list)
else:
video_path, label, num_frames = self._video_list[index]
frames = []
idx_first = -99999
for seg in range(self._num_segments):
if self._is_train:
gop_index, gop_pos = self._get_train_frame_index(num_frames, seg)
else:
gop_index, gop_pos = self._get_test_frame_index(num_frames, seg)
flow_path = video_path_to_flow_path(self._flow_root, video_path)
if self._flow_folder == 'tvl1':
flow_tmpl = 'flow_{0}_{1:05d}.jpg'
idx = gop_index * GOP_SIZE + gop_pos + 1
if idx_first == -99999:
idx_first = idx
# read the corresponding pre-computed optical flow along x and y dimension
x_img = np.array(Image.open(os.path.join(flow_path, flow_tmpl.format('x', idx))).convert('L'))
y_img = np.array(Image.open(os.path.join(flow_path, flow_tmpl.format('y', idx))).convert('L'))
flow = np.stack([x_img, y_img], axis=-1)
if flow is None:
print('Error: loading flow %s failed.' % video_path)
# load MV and data pre-processing
mv = load(video_path, gop_index, gop_pos, representation_idx, self._accumulate)
if mv is None:
print('Error: loading video %s failed.' % video_path)
mv = np.zeros((256, 256, 2)) if self._representation == 'mv' else np.zeros((256, 256, 3))
else:
if self._representation == 'mv':
if self._mv_minmaxnorm == 1:
mv = clip_and_scale(mv, 20) # scale values from +-20 to +-127.5
mv += 128
mv = (np.minimum( | np.maximum(mv, 0) | numpy.maximum |
'''
Created on Nov 25, 2011
@author: cryan
Code for numerical optimal control.
'''
import numpy as np
from numpy import sin,cos
from copy import deepcopy
from scipy.constants import pi
from scipy.linalg import expm
from scipy.linalg import eigh
from scipy.optimize import fmin_l_bfgs_b
import matplotlib.pyplot as plt
from PulseSequence import PulseSequence
from QuantumSystems import Hamiltonian
from Evolution import expm_eigen
#Try to load the CPPBackEnd
try:
import PySim.CySim
CPPBackEnd = True
except ImportError:
CPPBackEnd = False
class PulseParams(PulseSequence):
'''
For now just a container for pulse optimization parameters. Subclasses a PulseSequence as it has to define similar things.
'''
def __init__(self):
super(PulseParams, self).__init__()
self.numChannels = 0
self.numPoints = 0
self.startControlAmps = None #Initial guess for the pulse
self.fTol = 1e-4 #optimization paramter: will exit when difference in fidelity is less than this.
self.maxfun = 15000
self.derivType = 'approx'
self.optimType = 'unitary' #uniary or state2state optimization
self.Ugoal = None
self.rhoStart = None
self.rhoGoal = None
@property
def dim(self):
if self.Ugoal is not None:
return self.Ugoal.shape[0]
else:
if self.rhoStart is not None:
return self.rhoStart.shape[0]
else:
return 0
def create_random_pulse(numChannels, numPoints):
'''
Helper function to create smooth pulse starting point.
'''
#TODO: return something besides ones
return 2e6*np.ones((numChannels, numPoints))
def calc_control_Hams(optimParams, systemParams):
'''
A helper function to calculate the control Hamiltonians in the interaction frame. This only needs to be done once per opimization.
'''
#We'll store them in a numControlHamsxnumTimeSteps array
controlHams = np.zeros((systemParams.numControlHams, optimParams.numTimeSteps, systemParams.dim, systemParams.dim), dtype = np.complex128)
#Now loop over each timestep
curTime = 0.0
for timect, timeStep in enumerate(optimParams.timeSteps):
#Loop over each of the control Hamiltonians
for controlct, tmpControl in enumerate(optimParams.controlLines):
tmpPhase = 2*pi*tmpControl.freq*curTime + tmpControl.phase
if tmpControl.controlType == 'rotating':
tmpHam = Hamiltonian(cos(tmpPhase)*systemParams.controlHams[controlct]['inphase'].matrix + sin(tmpPhase)*systemParams.controlHams[controlct]['quadrature'].matrix)
elif tmpControl.controlType == 'sinusoidal':
tmpHam = Hamiltonian(cos(tmpPhase)*systemParams.controlHams[controlct]['inphase'])
else:
raise KeyError('Unknown control type.')
if optimParams.H_int is not None:
#Move the total Hamiltonian into the interaction frame
tmpHam.calc_interaction_frame(optimParams.H_int, curTime)
controlHams[controlct, timect] = tmpHam.interactionMatrix + optimParams.H_int.matrix
else:
#Just store the matrix
controlHams[controlct, timect] = tmpHam.matrix
#Update the times
curTime += timeStep
return controlHams
def evolution_unitary(optimParams, systemParams, controlHams):
'''
Main function for evolving a state under unitary conditions
'''
totU = np.eye(systemParams.dim)
timeStepUs = np.zeros((optimParams.timeSteps.size, systemParams.dim, systemParams.dim), dtype=np.complex128)
Vs = np.zeros((optimParams.timeSteps.size, systemParams.dim, systemParams.dim), dtype=np.complex128)
Ds = np.zeros((optimParams.timeSteps.size, systemParams.dim), dtype=np.float64)
totHams = np.zeros_like(timeStepUs)
#Loop over each timestep in the sequence
curTime = 0.0
for timect, timeStep in enumerate(optimParams.timeSteps):
#Initialize the Hamiltonian to the drift Hamiltonian
Htot = deepcopy(systemParams.Hnat)
if optimParams.H_int is not None:
#Move the total Hamiltonian into the interaction frame
Htot.calc_interaction_frame(optimParams.H_int, curTime)
Htot.matrix = | np.copy(Htot.interactionMatrix) | numpy.copy |
# pylint: disable=redefined-outer-name
"""
Tests contour.
"""
import os
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from pygmt import Figure
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt")
@pytest.fixture(scope="module")
def data():
"""
Load the point data from the test file.
"""
return pd.read_table(POINTS_DATA, header=None, sep=r"\s+")
@pytest.fixture(scope="module")
def region():
"""
The data region.
"""
return [10, 70, -5, 10]
@pytest.mark.mpl_image_compare
def test_contour_vec(region):
"""
Plot an x-centered gaussian kernel with different y scale.
"""
fig = Figure()
x, y = np.meshgrid(
| np.linspace(region[0], region[1]) | numpy.linspace |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.init import xavier_uniform_
class Scholar(object):
def __init__(self, config, alpha=1.0, learning_rate=0.001, init_embeddings=None, update_embeddings=True,
init_bg=None, update_background=True, adam_beta1=0.99, adam_beta2=0.999, device=None, seed=None,
classify_from_covars=True):
"""
Create the model
:param config: a dictionary with the model configuration
:param alpha: hyperparameter for the document representation prior
:param learning_rate: learning rate for Adam
:param init_embeddings: a matrix of embeddings to initialize the first layer of the bag-of-words encoder
:param update_embeddings: if True, update word embeddings during training
:param init_bg: a vector of empirical log backgound frequencies
:param update_background: if True, update the background term during training
:param adam_beta1: first hyperparameter for Adam
:param adam_beta2: second hyperparameter for Adam
:param device: (int) the number of the GPU to use
"""
if seed is not None:
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
self.network_architecture = config
self.learning_rate = learning_rate
self.adam_beta1 = adam_beta1
self.update_embeddings = update_embeddings
self.update_background = update_background
# create priors on the hidden state
self.n_topics = (config["n_topics"])
if device is None:
self.device = 'cpu'
else:
self.device = 'cuda:' + str(device)
# interpret alpha as either a (symmetric) scalar prior or a vector prior
if np.array(alpha).size == 1:
# if alpha is a scalar, create a symmetric prior vector
self.alpha = alpha * np.ones((1, self.n_topics)).astype(np.float32)
else:
# otherwise use the prior as given
self.alpha = np.array(alpha).astype(np.float32)
assert len(self.alpha) == self.n_topics
# create the pyTorch model
self._model = torchScholar(config, self.alpha, update_embeddings, init_emb=init_embeddings, bg_init=init_bg, device=self.device, classify_from_covars=classify_from_covars).to(self.device)
# set the criterion
self.criterion = nn.BCEWithLogitsLoss()
# create the optimizer
grad_params = filter(lambda p: p.requires_grad, self._model.parameters())
self.optimizer = optim.Adam(grad_params, lr=learning_rate, betas=(adam_beta1, adam_beta2))
def fit(self, X, Y, PC, TC, eta_bn_prop=1.0, l1_beta=None, l1_beta_c=None, l1_beta_ci=None):
"""
Fit the model to a minibatch of data
:param X: np.array of document word counts [batch size x vocab size]
:param Y: np.array of labels [batch size x n_labels]
:param PC: np.array of prior covariates influencing the document-topic prior [batch size x n_prior_covars]
:param TC: np.array of topic covariates to be associated with topical deviations [batch size x n_topic_covars]
:param l1_beta: np.array of prior variances on the topic weights
:param l1_beta_c: np.array of prior variances on the weights for topic covariates
:param l1_beta_ci: np.array of prior variances on the weights for topic-covariate interactions
:return: loss; label pred probs; document representations; neg-log-likelihood; KLD
"""
# move data to device
X = torch.Tensor(X).to(self.device)
if Y is not None:
Y = torch.Tensor(Y).to(self.device)
if PC is not None:
PC = torch.Tensor(PC).to(self.device)
if TC is not None:
TC = torch.Tensor(TC).to(self.device)
self.optimizer.zero_grad()
# do a forward pass
thetas, X_recon, Y_probs, losses = self._model(X, Y, PC, TC, eta_bn_prop=eta_bn_prop, l1_beta=l1_beta, l1_beta_c=l1_beta_c, l1_beta_ci=l1_beta_ci)
loss, nl, kld = losses
# update model
loss.backward()
self.optimizer.step()
if Y_probs is not None:
Y_probs = Y_probs.to('cpu').detach().numpy()
return loss.to('cpu').detach().numpy(), Y_probs, thetas.to('cpu').detach().numpy(), nl.to('cpu').detach().numpy(), kld.to('cpu').detach().numpy()
def predict(self, X, PC, TC, eta_bn_prop=0.0):
"""
Predict labels for a minibatch of data
"""
# input a vector of all zeros in place of the labels that the model has been trained on
batch_size = self.get_batch_size(X)
Y = np.zeros((batch_size, self.network_architecture['n_labels'])).astype('float32')
X = torch.Tensor(X).to(self.device)
Y = torch.Tensor(Y).to(self.device)
if PC is not None:
PC = torch.Tensor(PC).to(self.device)
if TC is not None:
TC = torch.Tensor(TC).to(self.device)
theta, _, Y_recon, _ = self._model(X, Y, PC, TC, do_average=False, var_scale=0.0, eta_bn_prop=eta_bn_prop)
return theta, Y_recon.to('cpu').detach().numpy()
def predict_from_topics(self, theta, PC, TC, eta_bn_prop=0.0):
"""
Predict label probabilities from each topic
"""
theta = torch.Tensor(theta)
if PC is not None:
PC = torch.Tensor(PC)
if TC is not None:
TC = torch.Tensor(TC)
probs = self._model.predict_from_theta(theta, PC, TC)
return probs.to('cpu').detach().numpy()
def get_losses(self, X, Y, PC, TC, eta_bn_prop=0.0, n_samples=0):
"""
Compute and return the loss values for all instances in X, Y, PC, and TC averaged over multiple samples
"""
batch_size = self.get_batch_size(X)
if batch_size == 1:
X = np.expand_dims(X, axis=0)
if Y is not None and batch_size == 1:
Y = np.expand_dims(Y, axis=0)
if PC is not None and batch_size == 1:
PC = np.expand_dims(PC, axis=0)
if TC is not None and batch_size == 1:
TC = np.expand_dims(TC, axis=0)
X = torch.Tensor(X).to(self.device)
if Y is not None:
Y = torch.Tensor(Y).to(self.device)
if PC is not None:
PC = torch.Tensor(PC).to(self.device)
if TC is not None:
TC = torch.Tensor(TC).to(self.device)
if n_samples == 0:
_, _, _, temp = self._model(X, Y, PC, TC, do_average=False, var_scale=0.0, eta_bn_prop=eta_bn_prop)
loss, NL, KLD = temp
losses = loss.to('cpu').detach().numpy()
else:
_, _, _, temp = self._model(X, Y, PC, TC, do_average=False, var_scale=1.0, eta_bn_prop=eta_bn_prop)
loss, NL, KLD = temp
losses = loss.to('cpu').detach().numpy()
for s in range(1, n_samples):
_, _, _, temp = self._model(X, Y, PC, TC, do_average=False, var_scale=1.0, eta_bn_prop=eta_bn_prop)
loss, NL, KLD = temp
losses += loss.to('cpu').detach().numpy()
losses /= float(n_samples)
return losses
def compute_theta(self, X, Y, PC, TC, eta_bn_prop=0.0):
"""
Return the latent document representation (mean of posterior of theta) for a given batch of X, Y, PC, and TC
"""
batch_size = self.get_batch_size(X)
if batch_size == 1:
X = np.expand_dims(X, axis=0)
if Y is not None and batch_size == 1:
Y = np.expand_dims(Y, axis=0)
if PC is not None and batch_size == 1:
PC = np.expand_dims(PC, axis=0)
if TC is not None and batch_size == 1:
TC = np.expand_dims(TC, axis=0)
X = torch.Tensor(X).to(self.device)
if Y is not None:
Y = torch.Tensor(Y).to(self.device)
if PC is not None:
PC = torch.Tensor(PC).to(self.device)
if TC is not None:
TC = torch.Tensor(TC).to(self.device)
theta, _, _, _ = self._model(X, Y, PC, TC, do_average=False, var_scale=0.0, eta_bn_prop=eta_bn_prop)
return theta.to('cpu').detach().numpy()
def get_weights(self):
"""
Return the topic-vocabulary deviation weights
"""
emb = self._model.beta_layer.to('cpu').weight.detach().numpy().T
self._model.beta_layer.to(self.device)
return emb
def get_bg(self):
"""
Return the background terms
"""
bg = self._model.beta_layer.to('cpu').bias.detach().numpy()
self._model.beta_layer.to(self.device)
return bg
def get_prior_weights(self):
"""
Return the weights associated with the prior covariates
"""
emb = self._model.prior_covar_weights.to('cpu').weight.detach().numpy().T
self._model.prior_covar_weights.to(self.device)
return emb
def get_covar_weights(self):
"""
Return the topic weight (deviations) associated with the topic covariates
"""
emb = self._model.beta_c_layer.to('cpu').weight.detach().numpy().T
self._model.beta_c_layer.to(self.device)
return emb
def get_covar_interaction_weights(self):
"""
Return the weights (deviations) associated with the topic-covariate interactions
"""
emb = self._model.beta_ci_layer.to('cpu').weight.detach().numpy().T
self._model.beta_ci_layer.to(self.device)
return emb
def get_batch_size(self, X):
"""
Get the batch size for a minibatch of data
:param X: the minibatch
:return: the size of the minibatch
"""
if len(X.shape) == 1:
batch_size = 1
else:
batch_size, _ = X.shape
return batch_size
def eval(self):
self._model.eval()
def train(self):
self._model.train()
class torchScholar(nn.Module):
def __init__(self, config, alpha, update_embeddings=True, init_emb=None, bg_init=None, device='cpu', classify_from_covars=False):
super(torchScholar, self).__init__()
# load the configuration
self.vocab_size = config['vocab_size']
self.words_emb_dim = config['embedding_dim']
self.n_topics = config['n_topics']
self.n_labels = config['n_labels']
self.n_prior_covars = config['n_prior_covars']
self.n_topic_covars = config['n_topic_covars']
self.classifier_layers = config['classifier_layers']
self.use_interactions = config['use_interactions']
self.l1_beta_reg = config['l1_beta_reg']
self.l1_beta_c_reg = config['l1_beta_c_reg']
self.l1_beta_ci_reg = config['l1_beta_ci_reg']
self.l2_prior_reg = config['l2_prior_reg']
self.classification_loss_coef = config['classification_loss_coef']
self.reconstr_loss_coef = config['reconstr_loss_coef']
self.kl_loss_coef = config['kl_loss_coef']
self.device = device
self.classify_from_covars = classify_from_covars
# create a layer for prior covariates to influence the document prior
if self.n_prior_covars > 0:
self.prior_covar_weights = nn.Linear(self.n_prior_covars, self.n_topics, bias=False)
else:
self.prior_covar_weights = None
# create the encoder
self.embeddings_x_layer = nn.Linear(self.vocab_size, self.words_emb_dim, bias=False)
emb_size = self.words_emb_dim
classifier_input_dim = self.n_topics
if self.n_prior_covars > 0:
emb_size += self.n_prior_covars
if self.classify_from_covars:
classifier_input_dim += self.n_prior_covars
if self.n_topic_covars > 0:
emb_size += self.n_topic_covars
if self.classify_from_covars:
classifier_input_dim += self.n_topic_covars
if self.n_labels > 0:
emb_size += self.n_labels
self.encoder_dropout_layer = nn.Dropout(p=0.2)
if not update_embeddings:
self.embeddings_x_layer.weight.requires_grad = False
if init_emb is not None:
self.embeddings_x_layer.weight.data.copy_(torch.from_numpy(init_emb)).to(self.device)
else:
xavier_uniform_(self.embeddings_x_layer.weight)
# create the mean and variance components of the VAE
self.mean_layer = nn.Linear(emb_size, self.n_topics)
self.logvar_layer = nn.Linear(emb_size, self.n_topics)
self.mean_bn_layer = nn.BatchNorm1d(self.n_topics, eps=0.001, momentum=0.001, affine=True)
self.mean_bn_layer.weight.data.copy_(torch.from_numpy(np.ones(self.n_topics))).to(self.device)
self.mean_bn_layer.weight.requires_grad = False
self.logvar_bn_layer = nn.BatchNorm1d(self.n_topics, eps=0.001, momentum=0.001, affine=True)
self.logvar_bn_layer.weight.data.copy_(torch.from_numpy(np.ones(self.n_topics))).to(self.device)
self.logvar_bn_layer.weight.requires_grad = False
self.z_dropout_layer = nn.Dropout(p=0.2)
# create the decoder
self.beta_layer = nn.Linear(self.n_topics, self.vocab_size)
xavier_uniform_(self.beta_layer.weight)
if bg_init is not None:
self.beta_layer.bias.data.copy_(torch.from_numpy(bg_init))
self.beta_layer.bias.requires_grad = False
self.beta_layer = self.beta_layer.to(self.device)
if self.n_topic_covars > 0:
self.beta_c_layer = nn.Linear(self.n_topic_covars, self.vocab_size, bias=False).to(self.device)
if self.use_interactions:
self.beta_ci_layer = nn.Linear(self.n_topics * self.n_topic_covars, self.vocab_size, bias=False).to(self.device)
# create the classifier
if self.n_labels > 0:
if self.classifier_layers == 0:
self.classifier_layer_0 = nn.Linear(classifier_input_dim, self.n_labels).to(self.device)
else:
self.classifier_layer_0 = nn.Linear(classifier_input_dim, classifier_input_dim).to(self.device)
self.classifier_layer_1 = nn.Linear(classifier_input_dim, self.n_labels).to(self.device)
# create a final batchnorm layer
self.eta_bn_layer = nn.BatchNorm1d(self.vocab_size, eps=0.001, momentum=0.001, affine=True).to(self.device)
self.eta_bn_layer.weight.data.copy_(torch.from_numpy(np.ones(self.vocab_size)).to(self.device))
self.eta_bn_layer.weight.requires_grad = False
# create the document prior terms
prior_mean = (np.log(alpha).T - np.mean(np.log(alpha), 1)).T
prior_var = (((1.0 / alpha) * (1 - (2.0 / self.n_topics))).T + (1.0 / (self.n_topics * self.n_topics)) * | np.sum(1.0 / alpha, 1) | numpy.sum |
"""
Runs a model on a single node across multiple gpus.
"""
import os
from pathlib import Path
import torch
import numpy as np
import torch.nn.functional as F
import scipy.io as sio
import matplotlib.pyplot as plt
import configargparse
from src.DeepRegression import Model
TOL = 1e-14
def main(hparams):
if hparams.gpu == 0:
device = torch.device("cpu")
else:
ngpu = "cuda:" + str(hparams.gpu - 1)
print(ngpu)
device = torch.device(ngpu)
model = Model(hparams).to(device)
print(hparams)
print()
# Model loading
model_path = os.path.join(
f"lightning_logs/version_" + hparams.test_check_num, "checkpoints/"
)
ckpt = list(Path(model_path).glob("*.ckpt"))[0]
print(ckpt)
model = model.load_from_checkpoint(str(ckpt))
model.eval()
model.to(device)
mae_test = []
# Testing Set
root = hparams.data_root
test_list = hparams.test_list
file_path = os.path.join(root, test_list)
test_name = os.path.splitext(os.path.basename(test_list))[0]
root_dir = os.path.join(root, "test", test_name)
with open(file_path, "r") as fp:
for line in fp.readlines():
# Data Reading
data_path = line.strip()
path = os.path.join(root_dir, data_path)
if model.vec:
_, test_data = model.read_vec_data()
obs_index, heat_obs, pred_index, heat0, heat = test_data._loader(path)
u_true = heat.squeeze().squeeze().numpy()
heat_obs = (heat_obs - hparams.mean_layout) / hparams.std_layout
heat0 = (heat0 - hparams.mean_heat) / hparams.std_heat
heat = (heat - hparams.mean_heat) / hparams.std_heat
obs_index, heat_obs, pred_index, heat0, heat = (
obs_index.to(device),
heat_obs.to(device),
pred_index.to(device),
heat0.to(device),
heat.to(device),
)
heat_info = [obs_index, heat_obs, pred_index, heat0]
if (
model.layout_model == "ConditionalNeuralProcess"
or model.layout_model == "TransformerRecon"
):
heat_info[1] = heat_info[1].transpose(1, 2)
heat_info[3] = heat_info[3].transpose(2, 3)
elif model.layout_model == "DenseDeepGCN":
heat_obs = heat_obs.squeeze()
pseudo_heat = torch.zeros_like(heat0[:, 0, :]).squeeze()
inputs = (
torch.cat(
(
torch.cat((heat_obs, pseudo_heat), 1).unsqueeze(-1),
torch.cat((obs_index, pred_index[:, 0, ...]), 1),
),
2,
)
.transpose(1, 2)
.unsqueeze(-1)
.unsqueeze(0)
)
for i in range(hparams.div_num * hparams.div_num - 1):
input_single = (
torch.cat(
(
torch.cat((heat_obs, pseudo_heat), 1).unsqueeze(-1),
torch.cat(
(obs_index, pred_index[:, i + 1, ...]), 1
),
),
2,
)
.transpose(1, 2)
.unsqueeze(-1)
.unsqueeze(0)
)
inputs = torch.cat((inputs, input_single), 0)
heat_info = inputs
else:
data = sio.loadmat(path)
u_true, u_obs = data["u"], data["u_obs"]
u_obs[np.where(u_obs < TOL)] = hparams.mean_layout
u_obs = (
torch.Tensor((u_obs - hparams.mean_layout) / hparams.std_layout)
.unsqueeze(0)
.unsqueeze(0)
.to(device)
)
heat = (
torch.Tensor((u_true - hparams.mean_heat) / hparams.std_heat)
.unsqueeze(0)
.unsqueeze(0)
.to(device)
)
heat_info = u_obs
hs_F = sio.loadmat(path)["F"]
# Plot u_obs and Real Temperature Field
fig = plt.figure(figsize=(22.5, 5))
grid_x = np.linspace(0, 0.1, num=200)
grid_y = np.linspace(0, 0.1, num=200)
X, Y = | np.meshgrid(grid_x, grid_y) | numpy.meshgrid |
# -*- coding: utf-8 -*-
"""
BT2407 実装用の各種LUTを作成する
===============================
"""
# import standard libraries
import os
# import third-party libraries
import numpy as np
from multiprocessing import Pool, cpu_count, Array
import matplotlib.pyplot as plt
from colour.models import BT709_COLOURSPACE, BT2020_COLOURSPACE
from colour import Lab_to_XYZ, XYZ_to_RGB, RGB_to_XYZ, XYZ_to_Lab,\
Lab_to_LCHab, RGB_COLOURSPACES, RGB_to_RGB, LCHab_to_Lab,\
write_LUT, LUT3D
import cv2
from scipy import interpolate
# import my libraries
import test_pattern_generator2 as tpg
import color_space as cs
import plot_utility as pu
import transfer_functions as tf
from bt2407_parameters import L_SAMPLE_NUM_MAX, H_SAMPLE_NUM_MAX,\
GAMUT_BOUNDARY_LUT_LUMINANCE_SAMPLE, GAMUT_BOUNDARY_LUT_HUE_SAMPLE,\
get_gamut_boundary_lut_name, get_l_cusp_name, get_focal_name,\
get_chroma_map_lut_name
from bt2047_gamut_mapping import get_chroma_lightness_val_specfic_hue,\
calc_chroma_lightness_using_length_from_l_focal,\
calc_chroma_lightness_using_length_from_c_focal, calc_cusp_lut,\
calc_degree_from_cl_data_using_c_focal,\
calc_degree_from_cl_data_using_l_focal,\
calc_distance_from_c_focal, calc_distance_from_l_focal,\
eliminate_inner_gamut_data_c_focal, eliminate_inner_gamut_data_l_focal,\
interpolate_chroma_map_lut, merge_lightness_mapping,\
bt2407_gamut_mapping_for_rgb_linear
from make_bt2047_luts import calc_value_from_hue_1dlut,\
calc_chroma_map_degree2, calc_l_cusp_specific_hue, calc_cusp_in_lc_plane,\
_calc_ab_coef_from_cl_point, solve_equation_for_intersection,\
calc_cusp_focal_specific_hue
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
def lch_to_lab(lch):
shape_bak = lch.shape
aa = lch[..., 1] * np.cos(lch[..., 2])
bb = lch[..., 1] * np.sin(lch[..., 2])
return np.dstack((lch[..., 0], aa, bb)).reshape(shape_bak)
def print_blog_param_sub(
rgb_2020=np.array([1023, 0, 0]), text="angle_40"):
rgb_2020_linear = (rgb_2020 / 1023) ** 2.4
lab_2020 = XYZ_to_Lab(
RGB_to_XYZ(
rgb_2020_linear, cs.D65, cs.D65,
BT2020_COLOURSPACE.RGB_to_XYZ_matrix))
lch_2020 = Lab_to_LCHab(lab_2020)
print(f"rgb_2020_{text}={rgb_2020}")
print(f"lab_2020_{text}={lab_2020}")
print(f"lch_2020_{text}={lch_2020}")
def print_blog_param():
"""
ブログ記載用のパラメータを吐く
"""
rgb_40_2020 = np.array([1001, 509, 321])
rgb_270_2020 = np.array([158, 421, 759])
print_blog_param_sub(rgb_40_2020, "40")
print_blog_param_sub(rgb_270_2020, "270")
def _make_debug_luminance_chroma_data_fixed_hue(cl_outer):
dst_step = 31
degree = np.linspace(-np.pi/2, np.pi/2, dst_step)
a1 = np.tan(degree)
b1 = 50 * np.ones_like(a1)
a2, b2 = _calc_ab_coef_from_cl_point(cl_outer)
out_chroma, out_lightness = solve_equation_for_intersection(
cl_outer, a1, b1, a2, b2, focal="L_Focal")
# chroma = cl_outer[..., 0]
# lightness = cl_outer[..., 1]
# step = GAMUT_BOUNDARY_LUT_HUE_SAMPLE // dst_step
# out_chroma = np.append(chroma[::step], chroma[-1])
# out_lightness = np.append(lightness[::step], lightness[-1])
return out_lightness, out_chroma
def _check_chroma_map_lut_interpolation(
hue_idx, hue,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709):
"""
interpolate_chroma_map_lut() の動作確認用のデバッグコード。
1. まずはLUT上の LC平面で確認
2. 次に補間が働く LC平面で確認
3. 今度は補間が働く ab平面で確認
"""
print(hue_idx, np.rad2deg(hue))
# とりあえず L*C* 平面のポリゴン準備
cl_inner = get_chroma_lightness_val_specfic_hue(
hue, get_gamut_boundary_lut_name(inner_color_space_name))
cl_outer = get_chroma_lightness_val_specfic_hue(
hue, get_gamut_boundary_lut_name(outer_color_space_name))
# cusp 準備
lh_inner_lut = np.load(
get_gamut_boundary_lut_name(inner_color_space_name))
inner_cusp = calc_cusp_in_lc_plane(hue, lh_inner_lut)
lh_outer_lut = np.load(
get_gamut_boundary_lut_name(outer_color_space_name))
outer_cusp = calc_cusp_in_lc_plane(hue, lh_outer_lut)
# l_cusp, l_focal, c_focal 準備
l_cusp_lut = np.load(
get_l_cusp_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name))
l_focal_lut = np.load(
get_focal_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Lfocal"))
c_focal_lut = np.load(
get_focal_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Cfocal"))
l_cusp = calc_value_from_hue_1dlut(hue, l_cusp_lut)
l_focal = calc_value_from_hue_1dlut(hue, l_focal_lut)
c_focal = calc_value_from_hue_1dlut(hue, c_focal_lut)
# Chroma Mapping の Focalからの距離の LUT データ
cmap_lut_c = np.load(
get_chroma_map_lut_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Cfocal"))
cmap_lut_l = np.load(
get_chroma_map_lut_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Lfocal"))
# st_degree, ed_degree を 1次元LUTの形で得る
# st_degree_l[hue] = 30°, ed_degree_l[hue] = 120° 的な?
inner_cusp_l_lut = calc_cusp_lut(lh_lut=lh_inner_lut)
st_degree_l, ed_degree_l, st_degree_c, ed_degree_c =\
calc_chroma_map_degree2(l_focal_lut, c_focal_lut, inner_cusp_l_lut)
# とりあえず検証用のデータを準備
# 一応、本番を想定して chroma-lightness から変換するように仕込む
# hue-degree --> chroma-lightness --> hue_degree --> 補間的な?
""" L_focal 基準データ """
lightness_l, chroma_l = _make_debug_luminance_chroma_data_fixed_hue(
cl_outer)
hue_array = np.ones(chroma_l.shape[0]) * hue
cl_data_l = np.dstack((chroma_l, lightness_l))[0]
test_degree_l = calc_degree_from_cl_data_using_l_focal(
cl_data=cl_data_l,
l_focal=calc_value_from_hue_1dlut(hue_array, l_focal_lut))
hd_data_l = np.dstack((hue_array, test_degree_l))[0]
len_from_l_focal = calc_distance_from_l_focal(
chroma_l, lightness_l, l_focal)
""" C_focal 基準データ """
lightness_c, chroma_c = _make_debug_luminance_chroma_data_fixed_hue(
cl_outer)
hue_array = np.ones(chroma_l.shape[0]) * hue
cl_data_c = np.dstack((chroma_c, lightness_c))[0]
test_degree_c = calc_degree_from_cl_data_using_c_focal(
cl_data=cl_data_c,
c_focal=calc_value_from_hue_1dlut(hue_array, c_focal_lut))
hd_data_c = np.dstack((hue_array, test_degree_c))[0]
len_from_c_focal = calc_distance_from_c_focal(
chroma_c, lightness_c, c_focal)
# まずは cmap_lut 値の Bilinear補間
cmap_value_l = interpolate_chroma_map_lut(
cmap_hd_lut=cmap_lut_l, degree_min=st_degree_l,
degree_max=ed_degree_l, data_hd=hd_data_l)
cmap_value_c = interpolate_chroma_map_lut(
cmap_hd_lut=cmap_lut_c, degree_min=st_degree_c,
degree_max=ed_degree_c, data_hd=hd_data_c)
# 除外データは除外
restore_idx_l = (len_from_l_focal <= cmap_value_l)
cmap_value_l[restore_idx_l] = len_from_l_focal[restore_idx_l]
restore_idx_c = (len_from_c_focal > cmap_value_c)
cmap_value_c[restore_idx_c] = len_from_c_focal[restore_idx_c]
# 補間して得られた cmap 値から CL平面上における座標を取得
icn_x_l, icn_y_l = calc_chroma_lightness_using_length_from_l_focal(
distance=cmap_value_l, degree=test_degree_l, l_focal=l_focal)
icn_x_c, icn_y_c = calc_chroma_lightness_using_length_from_c_focal(
distance=cmap_value_c, degree=test_degree_c, c_focal=c_focal)
_debug_plot_check_lightness_mapping_specific_hue(
hue, cl_inner, cl_outer, l_cusp, inner_cusp, outer_cusp,
l_cusp, l_focal, c_focal,
x_val=chroma_l, y_val=lightness_l, map_x=icn_x_l, map_y=icn_y_l,
focal_type="L_focal", h_idx=hue_idx,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
_debug_plot_check_lightness_mapping_specific_hue(
hue, cl_inner, cl_outer, l_cusp, inner_cusp, outer_cusp,
l_cusp, l_focal, c_focal,
x_val=chroma_c, y_val=lightness_c, map_x=icn_x_c, map_y=icn_y_c,
focal_type="C_focal", h_idx=hue_idx,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
def _check_lightness_mapping_specific_hue(
hue_idx, hue,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709):
"""
interpolate_chroma_map_lut() の動作確認用のデバッグコード。
"""
print(hue_idx, np.rad2deg(hue))
# とりあえず L*C* 平面のポリゴン準備
cl_inner = get_chroma_lightness_val_specfic_hue(
hue, get_gamut_boundary_lut_name(inner_color_space_name))
cl_outer = get_chroma_lightness_val_specfic_hue(
hue, get_gamut_boundary_lut_name(outer_color_space_name))
# cusp 準備
lh_inner_lut = np.load(
get_gamut_boundary_lut_name(inner_color_space_name))
inner_cusp = calc_cusp_in_lc_plane(hue, lh_inner_lut)
lh_outer_lut = np.load(
get_gamut_boundary_lut_name(outer_color_space_name))
outer_cusp = calc_cusp_in_lc_plane(hue, lh_outer_lut)
# l_cusp, l_focal, c_focal 準備
l_cusp_lut = np.load(
get_l_cusp_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name))
l_focal_lut = np.load(
get_focal_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Lfocal"))
c_focal_lut = np.load(
get_focal_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Cfocal"))
l_cusp = calc_value_from_hue_1dlut(hue, l_cusp_lut)
l_focal = calc_value_from_hue_1dlut(hue, l_focal_lut)
c_focal = calc_value_from_hue_1dlut(hue, c_focal_lut)
# Chroma Mapping の Focalからの距離の LUT データ
cmap_lut_c = np.load(
get_chroma_map_lut_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Cfocal"))
cmap_lut_l = np.load(
get_chroma_map_lut_name(
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name,
focal_type="Lfocal"))
# st_degree, ed_degree を 1次元LUTの形で得る
# st_degree_l[hue] = 30°, ed_degree_l[hue] = 120° 的な?
inner_cusp_l_lut = calc_cusp_lut(lh_lut=lh_inner_lut)
st_degree_l, ed_degree_l, st_degree_c, ed_degree_c =\
calc_chroma_map_degree2(l_focal_lut, c_focal_lut, inner_cusp_l_lut)
# とりあえず検証用のデータを準備
# 一応、本番を想定して chroma-lightness から変換するように仕込む
# hue-degree --> chroma-lightness --> hue_degree --> 補間的な?
""" L_focal 基準データ """
lightness_l, chroma_l = _make_debug_luminance_chroma_data_fixed_hue(
cl_outer)
hue_array = np.ones(chroma_l.shape[0]) * hue
cl_data_l = np.dstack((chroma_l, lightness_l))[0]
test_degree_l = calc_degree_from_cl_data_using_l_focal(
cl_data=cl_data_l,
l_focal=calc_value_from_hue_1dlut(hue_array, l_focal_lut))
hd_data_l = np.dstack((hue_array, test_degree_l))[0]
""" C_focal 基準データ """
lightness_c, chroma_c = _make_debug_luminance_chroma_data_fixed_hue(
cl_outer)
hue_array = np.ones(chroma_l.shape[0]) * hue
cl_data_c = np.dstack((chroma_c, lightness_c))[0]
test_degree_c = calc_degree_from_cl_data_using_c_focal(
cl_data=cl_data_c,
c_focal=calc_value_from_hue_1dlut(hue_array, c_focal_lut))
hd_data_c = np.dstack((hue_array, test_degree_c))[0]
# まずは cmap_lut 値の Bilinear補間
cmap_value_l = interpolate_chroma_map_lut(
cmap_hd_lut=cmap_lut_l, degree_min=st_degree_l,
degree_max=ed_degree_l, data_hd=hd_data_l)
cmap_value_c = interpolate_chroma_map_lut(
cmap_hd_lut=cmap_lut_c, degree_min=st_degree_c,
degree_max=ed_degree_c, data_hd=hd_data_c)
# out of gamut ではないデータは処理をしないようにする
eliminate_inner_gamut_data_l_focal(
dst_distance=cmap_value_l, src_chroma=chroma_l,
src_lightness=lightness_l, l_focal=l_focal)
eliminate_inner_gamut_data_c_focal(
dst_distance=cmap_value_c, src_chroma=chroma_c,
src_lightness=lightness_c, c_focal=c_focal)
# 補間して得られた cmap 値から CL平面上における座標を取得
icn_x_l, icn_y_l = calc_chroma_lightness_using_length_from_l_focal(
distance=cmap_value_l, degree=test_degree_l, l_focal=l_focal)
icn_x_c, icn_y_c = calc_chroma_lightness_using_length_from_c_focal(
distance=cmap_value_c, degree=test_degree_c, c_focal=c_focal)
# L_Focalベースと C_Focalベースの結果を統合
icn_x, icn_y = merge_lightness_mapping(
hd_data_l=hd_data_l, st_degree_l=st_degree_l,
chroma_map_l=icn_x_l, lightness_map_l=icn_y_l,
chroma_map_c=icn_x_c, lightness_map_c=icn_y_c)
_debug_plot_check_lightness_mapping_specific_hue(
hue, cl_inner, cl_outer, l_cusp, inner_cusp, outer_cusp,
l_cusp, l_focal, c_focal,
x_val=chroma_l, y_val=lightness_l, map_x=icn_x, map_y=icn_y,
focal_type="All", h_idx=hue_idx,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
def _debug_plot_check_lightness_mapping_specific_hue(
hue, cl_inner, cl_outer, lcusp, inner_cusp, outer_cusp,
l_cusp, l_focal, c_focal, x_val, y_val, map_x, map_y,
focal_type, h_idx=0, outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709):
graph_title = f"HUE = {hue/2/np.pi*360:.1f}°, for {focal_type}"
graph_title += f"={c_focal:.1f}" if focal_type == "C_focal" else ""
fig1, ax1 = pu.plot_1_graph(
fontsize=22,
figsize=(16 * 0.9, 9 * 1.0),
graph_title=graph_title,
xlabel="Chroma",
ylabel="Lightness",
legend_size=17,
xlim=[-10, 230],
ylim=[-3, 103],
xtick=[x * 20 for x in range(12)],
ytick=[x * 10 for x in range(11)],
linewidth=3,
return_figure=True)
ax1.patch.set_facecolor("#E0E0E0")
in_color = pu.BLUE
ou_color = pu.RED
fo_color = "#808080"
src_color = pu.GREEN
dst_color = pu.PINK
# gamut boundary
ax1.plot(
cl_inner[..., 0], cl_inner[..., 1], c=in_color, label="BT.709")
ax1.plot(cl_outer[..., 0], cl_outer[..., 1], c=ou_color, label="BT.2020")
# gamut cusp
ax1.plot(inner_cusp[1], inner_cusp[0], 's', ms=10, mec='k',
c=in_color, label="BT.709 Cusp")
ax1.plot(outer_cusp[1], outer_cusp[0], 's', ms=10, mec='k',
c=ou_color, label="BT.2020 Cusp")
# l_cusp, l_focal, c_focal
ax1.plot([0], [l_cusp], 'x', ms=12, mew=4, c=in_color, label="L_cusp")
ax1.plot([0], [l_focal], 'x', ms=12, mew=4, c=ou_color, label="L_focal")
ax1.plot([c_focal], [0], '*', ms=12, mew=3, c=ou_color, label="C_focal")
ax1.plot([0, c_focal], [l_focal, 0], '--', c='k')
# intersectionx
ax1.plot(x_val, y_val, 'o', ms=9, c=src_color, label="src point")
ax1.plot(map_x, map_y, 'o', ms=6, c=dst_color, label="dst point")
for x, y in zip(x_val, y_val):
if y >= (-l_focal * x / c_focal + l_focal):
aa = (y - l_focal) / x
bb = l_focal
xx = 230
yy = aa * xx + bb
ax1.plot([0, xx], [l_focal, yy], '--', c=fo_color, lw=1)
else:
aa = (y) / (x - c_focal)
bb = y - aa * x
xx = 0
yy = aa * xx + bb
ax1.plot([0, c_focal], [yy, 0], '--', c=fo_color, lw=1)
# annotation
diff = ((map_x - x_val) ** 2 + (map_y - y_val) ** 2) ** 0.5
arrowprops = dict(
facecolor='#333333', shrink=0.0, headwidth=8, headlength=10,
width=1)
for idx in range(len(map_x)):
if diff[idx] > 0.01:
st_pos = (x_val[idx], y_val[idx])
ed_pos = (map_x[idx], map_y[idx])
ax1.annotate(
"", xy=ed_pos, xytext=st_pos, xycoords='data',
textcoords='data', ha='left', va='bottom',
arrowprops=arrowprops)
graph_name = f"/work/overuse/2020/020_explain_BT2407/lightness_mapping_"\
+ f"{outer_color_space_name}_to_{inner_color_space_name}_"\
+ f"{focal_type}_{h_idx:04d}.png"
plt.legend(loc='upper right')
print(graph_name)
# plt.savefig(graph_name, bbox_inches='tight', pad_inches=0.1)
plt.savefig(graph_name) # オプション付けるとエラーになるので外した
# plt.show()
plt.close(fig1)
def _check_upper_and_lower_mapping(
hue_sample_num=10,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709):
hue_list = np.deg2rad(
np.linspace(0, 360, hue_sample_num, endpoint=False))
args = []
for idx, hue in enumerate(hue_list):
# _check_chroma_map_lut_interpolation(
# hue_idx=idx, hue=hue,
# outer_color_space_name=cs.BT2020,
# inner_color_space_name=cs.BT709)
d = dict(
hue_idx=idx, hue=hue,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
args.append(d)
with Pool(cpu_count()) as pool:
pool.map(thread_wrapper_check_chroma_map_lut_interpolation, args)
def thread_wrapper_check_chroma_map_lut_interpolation(args):
_check_chroma_map_lut_interpolation(**args)
def _check_lightness_mapping_specific_hue_seq(
hue_sample_num=16,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709):
hue_list = np.deg2rad(
np.linspace(0, 360, hue_sample_num, endpoint=False))
args = []
for idx, hue in enumerate(hue_list):
# _check_lightness_mapping_specific_hue(
# hue_idx=idx, hue=hue,
# outer_color_space_name=cs.BT2020,
# inner_color_space_name=cs.BT709)
d = dict(
hue_idx=idx, hue=hue,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
args.append(d)
with Pool(cpu_count()) as pool:
pool.map(thread_wrapper_check_lightness_mapping, args)
def thread_wrapper_check_lightness_mapping(args):
_check_lightness_mapping_specific_hue(**args)
def _debug_plot_blog_mapping_after(
src_rgb, dst_rgb, src_lch, dst_lch,
chroma_min=-5, chroma_max=220, ll_min=0, ll_max=100,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709):
"""
ブログでの説明用にシンプルな Chroma Lightness平面をプロット
入力データもプロットするよん。
"""
hue = np.deg2rad(dst_lch[2])
cl_inner = get_chroma_lightness_val_specfic_hue(
hue=hue,
lh_lut_name=get_gamut_boundary_lut_name(inner_color_space_name))
cl_outer =\
get_chroma_lightness_val_specfic_hue(
hue=hue,
lh_lut_name=get_gamut_boundary_lut_name(outer_color_space_name))
l_cusp, l_focal, c_focal = calc_cusp_focal_specific_hue(
hue=hue,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709)
lh_inner_lut = np.load(
get_gamut_boundary_lut_name(inner_color_space_name))
inner_cusp = calc_cusp_in_lc_plane(hue, lh_inner_lut)
lh_outer_lut = np.load(
get_gamut_boundary_lut_name(outer_color_space_name))
outer_cusp = calc_cusp_in_lc_plane(hue, lh_outer_lut)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(16 * 0.9, 9 * 1.0),
graph_title=f"HUE = {hue/2/np.pi*360:.1f}°",
graph_title_size=None,
xlabel="Chroma",
ylabel="Lightness",
axis_label_size=None,
legend_size=17,
xlim=[chroma_min, chroma_max],
ylim=[ll_min, ll_max],
xtick=[20 * x for x in range(12)],
ytick=[x * 10 for x in range(11)],
xtick_size=None, ytick_size=None,
linewidth=3,
return_figure=True)
ax1.patch.set_facecolor("#E0E0E0")
in_color = "#909090"
ou_color = "#000000"
l_cups_line = "#333333"
fo_color = "#333333"
# gamut boundary
ax1.plot(
cl_inner[..., 0], cl_inner[..., 1], c=in_color,
label=inner_color_space_name)
ax1.plot(
cl_outer[..., 0], cl_outer[..., 1], c=ou_color,
label=outer_color_space_name)
ax1.plot(
src_lch[..., 1], src_lch[..., 0], 'o', c=src_rgb, ms=13,
label="src")
ax1.plot(
dst_lch[..., 1], dst_lch[..., 0], 'o', c=dst_rgb, ms=13,
label="dst")
x = dst_lch[..., 1]
y = dst_lch[..., 0]
if y >= (-l_focal * x / c_focal + l_focal):
aa = (y - l_focal) / x
bb = l_focal
xx = 230
yy = aa * xx + bb
ax1.plot([0, xx], [l_focal, yy], '--', lw=1, c=fo_color)
else:
aa = (y) / (x - c_focal)
bb = y - aa * x
xx = 0
yy = aa * xx + bb
ax1.plot([0, c_focal], [yy, 0], '--', lw=1, c=fo_color)
# Cusp
ax1.plot(inner_cusp[1], inner_cusp[0], 's', ms=10, mec='k',
c=in_color, label=f"{inner_color_space_name} cusp", zorder=3)
ax1.plot(outer_cusp[1], outer_cusp[0], 's', ms=10, mec='k',
c=ou_color, label=f"{outer_color_space_name} cusp", zorder=3)
# if inner_cusp[1] < outer_cusp[1]:
# ax1.plot([0, outer_cusp[1]], [l_cusp, outer_cusp[0]], '--', lw=1,
# c=l_cups_line)
# else:
# ax1.plot([0, inner_cusp[1]], [l_cusp, inner_cusp[0]], '--', lw=1,
# c=l_cups_line)
# l_cusp, l_focal, c_focal
ax1.plot([0], [l_cusp], 'x', ms=12, mew=4, c=pu.BLUE, label="L_cusp",
zorder=3)
ax1.plot([0], [l_focal], 'x', ms=12, mew=4, c=pu.RED, label="L_focal",
zorder=3)
ax1.plot([c_focal], [0], '*', ms=12, mew=3, c=pu.RED, label="C_focal",
zorder=3)
ax1.plot(
[0, c_focal], [l_focal, 0], '--', c='k', label="L_focal to C_focal")
if c_focal > chroma_max:
ax1.text(182, 0, f"C_focal = {c_focal:.1f}")
# annotation
fcolor = 0
fcolor = np.array([fcolor, fcolor, fcolor])
arrowprops = dict(
facecolor=fcolor, shrink=0.0, headwidth=12, headlength=15,
width=3, alpha=0.6)
st_pos = (src_lch[1], src_lch[0])
ed_pos = (dst_lch[1], dst_lch[0])
ax1.annotate(
"", xy=ed_pos, xytext=st_pos, xycoords='data',
textcoords='data', ha='left', va='bottom',
arrowprops=arrowprops)
graph_name = f"/work/overuse/2020/020_explain_BT2407/"\
+ f"simple_cl_plane_mapping_HUE_"\
+ f"{hue/2/np.pi*360:.1f}.png"
plt.legend(loc='upper right')
plt.savefig(graph_name, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def _debug_lightness_mapping_for_rgb(
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709):
rgb_2020_gm24_1 = np.array([1001, 509, 321])
rgb_2020_gm24_2 = np.array([158, 421, 759])
rgb_2020_gm24 = np.array([rgb_2020_gm24_1, rgb_2020_gm24_2]) / 1023
rgb_2020_linear = rgb_2020_gm24 ** 2.4
rgb_709 = bt2407_gamut_mapping_for_rgb_linear(
rgb_linear=rgb_2020_linear,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
rgb_709_gm24 = np.round((rgb_709 ** (1/2.4) * 1023))
rgb_709_gm24_on_2020 = RGB_to_RGB(
rgb_709,
RGB_COLOURSPACES[inner_color_space_name],
RGB_COLOURSPACES[outer_color_space_name])\
** (1/2.4)
print(rgb_709_gm24_on_2020)
lab_709 = XYZ_to_Lab(
RGB_to_XYZ(
rgb_709, cs.D65, cs.D65,
RGB_COLOURSPACES[inner_color_space_name].RGB_to_XYZ_matrix))
lch_709 = Lab_to_LCHab(lab_709)
lab_2020 = XYZ_to_Lab(
RGB_to_XYZ(
rgb_2020_linear, cs.D65, cs.D65,
RGB_COLOURSPACES[outer_color_space_name].RGB_to_XYZ_matrix))
lch_2020 = Lab_to_LCHab(lab_2020)
_debug_plot_blog_mapping_after(
src_rgb=rgb_2020_gm24[0], dst_rgb=rgb_709_gm24_on_2020[0],
src_lch=lch_2020[0], dst_lch=lch_709[0],
chroma_min=-5, chroma_max=220, ll_min=-3, ll_max=103,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709)
_debug_plot_blog_mapping_after(
src_rgb=rgb_2020_gm24[1], dst_rgb=rgb_709_gm24_on_2020[1],
src_lch=lch_2020[1], dst_lch=lch_709[1],
chroma_min=-5, chroma_max=220, ll_min=-3, ll_max=103,
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709)
print(f"src_lch={lch_2020}")
print(f"dst_lch={lch_709}")
print(f"src_lab={lab_2020}")
print(f"dst_lab={lab_709}")
print(f"src_rgb={rgb_2020_gm24}")
print(f"dst_rgb={rgb_709_gm24}")
print(f"dst_rgb={rgb_709_gm24_on_2020*255}")
def _lch_to_rgb(lch, inner_color_space_name, outer_color_space_name):
lab = LCHab_to_Lab(lch)
xyz = Lab_to_XYZ(lab)
rgb_2020 = XYZ_to_RGB(
xyz, cs.D65, cs.D65,
RGB_COLOURSPACES[outer_color_space_name].XYZ_to_RGB_matrix)
rgb_709 = XYZ_to_RGB(
xyz, cs.D65, cs.D65,
RGB_COLOURSPACES[inner_color_space_name].XYZ_to_RGB_matrix)
r_judge = (rgb_709[0] >= 0) & (rgb_709[0] <= 1)
g_judge = (rgb_709[1] >= 0) & (rgb_709[1] <= 1)
b_judge = (rgb_709[2] >= 0) & (rgb_709[2] <= 1)
is_in_gamut = (r_judge & g_judge) & b_judge
rgb = np.clip(rgb_2020, 0.0, 1.0)
return rgb, is_in_gamut
def make_cielab_tp_ctrl(
outer_color_space_name=cs.BT2020,
inner_color_space_name=cs.BT709,
width=1920, height=1080, h_block_num=16*3, v_block_num=9*3):
"""
CIELABの横軸にHue、縦軸にChromaの
テストパターンを作る。
"""
lh_lut = np.load(
get_gamut_boundary_lut_name(color_space_name=outer_color_space_name))
lightness_lut_sample, hue_lut_sample = lh_lut.shape
cusp_buf = []
l_cusp_buf = []
hue_list = np.linspace(0, 2*np.pi, h_block_num, endpoint=False)
for hue in hue_list:
cusp_lc_temp = calc_cusp_in_lc_plane(hue, lh_lut)
cusp_buf.append(cusp_lc_temp)
l_cusp, l_focal, c_focal = calc_cusp_focal_specific_hue(
hue=hue,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
l_cusp_buf.append(l_cusp)
cusp_lc = np.array(cusp_buf)
l_cusp = np.array(l_cusp_buf)
cusp_chroma = cusp_lc[..., 1]
cusp_lightness = cusp_lc[..., 0]
block_width_list = tpg.equal_devision(width, h_block_num)
block_height_list = tpg.equal_devision(height, v_block_num)
h_buf = []
for h_idx in range(h_block_num):
block_width = block_width_list[h_idx]
hue = hue_list[h_idx]
aa = (cusp_lightness[h_idx] - l_cusp[h_idx]) / (cusp_chroma[h_idx] - 0)
bb = l_cusp[h_idx]
v_buf = []
for v_idx in range(v_block_num):
block_height = block_height_list[v_idx]
cc = v_idx / (v_block_num - 1) * cusp_chroma[h_idx]
ll = aa * cc + bb
lch = np.dstack((ll, cc, np.rad2deg(hue)))[0][0]
rgb, is_in_gamut = _lch_to_rgb(
lch,
outer_color_space_name=outer_color_space_name,
inner_color_space_name=inner_color_space_name)
temp_img = np.ones((block_height, block_width, 3))\
* rgb
if not is_in_gamut:
temp_img[:4, :4] = np.array([0.0, 0.0, 0.0])
v_buf.append(temp_img)
# print(f"hue={np.rad2deg(hue)}, c={cc:.2f}, l={ll:.2f}")
# print(f"hue={np.rad2deg(hue)}, rgb={rgb}, in={is_in_gamut}")
h_buf.append(np.vstack(v_buf))
img = np.hstack(h_buf)
fname = f"./figures/bt2020_tp_src_{width}x{height}.png"
cv2.imwrite(
fname, np.uint16(np.round((img[..., ::-1] ** (1/2.4)) * 0xFFFF)))
def make_cielab_boundary_tp(
color_space_name=cs.BT709,
width=1920, height=1080, h_block_num=16, v_block_num=9):
hue_list = np.linspace(0, 2*np.pi, h_block_num, endpoint=False)
cusp_buf = []
lh_lut = np.load(
get_gamut_boundary_lut_name(color_space_name=color_space_name))
lightness_lut_sample, hue_lut_sample = lh_lut.shape
for hue in hue_list:
cusp_lc_temp = calc_cusp_in_lc_plane(hue, lh_lut)
cusp_buf.append(cusp_lc_temp)
cusp_lc = np.array(cusp_buf)
cusp_lightness_list = cusp_lc[..., 0]
chroma_max = np.max(cusp_lc[..., 1])
block_width_list = tpg.equal_devision(width, h_block_num)
block_height_list = tpg.equal_devision(height, v_block_num)
h_buf = []
for h_idx in range(h_block_num):
hue = hue_list[h_idx]
block_width = block_width_list[h_idx]
boundary = get_chroma_lightness_val_specfic_hue(
hue, get_gamut_boundary_lut_name(color_space_name))
v_buf = []
cusp_lightness = cusp_lightness_list[h_idx]
cx = boundary[boundary[..., 1] >= cusp_lightness][..., 0]
ly = boundary[boundary[..., 1] >= cusp_lightness][..., 1]
ll_func = interpolate.interp1d(cx, ly)
for v_idx in range(v_block_num):
block_height = block_height_list[v_idx]
cc = v_idx / (v_block_num - 1) * chroma_max
lower_ok =\
(boundary[..., 0] <= cc) & (boundary[..., 1] >= cusp_lightness)
upper_ok =\
(boundary[..., 0] >= cc) & (boundary[..., 1] >= cusp_lightness)
lower_st_idx = np.argmax(lower_ok)
st_idx = lower_st_idx
if np.sum(upper_ok == True) > 0:
# ll = boundary[st_idx][1]
ll = ll_func(cc)
# cc = boundary[st_idx][0]
lch = np.dstack((ll, cc, np.rad2deg(hue)))
lab = LCHab_to_Lab(lch)
xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(
xyz, cs.D65, cs.D65,
RGB_COLOURSPACES[color_space_name].XYZ_to_RGB_matrix)
rgb = | np.clip(rgb, 0.0, 1.0) | numpy.clip |
import itertools
import copy
import numpy as np
import numpy.testing as npt
import pytest
import quara.objects.composite_system as csys
import quara.objects.elemental_system as esys
from quara.objects.matrix_basis import (
get_comp_basis,
get_gell_mann_basis,
get_normalized_pauli_basis,
get_pauli_basis,
convert_vec,
)
from quara.objects.operators import tensor_product
from quara.objects.povm import (
Povm,
convert_var_index_to_povm_index,
convert_povm_index_to_var_index,
convert_var_to_povm,
convert_vecs_to_var,
calc_gradient_from_povm,
get_x_povm,
get_xx_povm,
get_xy_povm,
get_xz_povm,
get_y_povm,
get_yx_povm,
get_yy_povm,
get_yz_povm,
get_z_povm,
get_zx_povm,
get_zy_povm,
get_zz_povm,
)
from quara.objects.state import get_x0_1q
from quara.settings import Settings
from quara.objects.composite_system_typical import generate_composite_system
from quara.objects.qoperation_typical import generate_qoperation_object
from quara.objects.operators import tensor_product
class TestPovm:
def test_validate_dtype_ng(self):
p1 = np.array([1, 0, 0, 0], dtype=np.complex128)
p2 = np.array([0, 0, 0, 1], dtype=np.complex128)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# entries of vecs are not real numbers
with pytest.raises(ValueError):
Povm(c_sys=c_sys, vecs=vecs)
def test_validate_set_of_hermitian_matrices_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
# Assert
expected = [p1, p2]
assert (povm[0] == expected[0]).all()
assert (povm[1] == expected[1]).all()
assert povm.composite_system is c_sys
def test_validate_set_of_hermitian_matrices_ng(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 1, 0, 0], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
with pytest.raises(ValueError):
# ValueError: povm must be a set of Hermitian matrices
_ = Povm(c_sys=c_sys, vecs=vecs)
def test_validate_set_of_hermitian_matrices_not_physical_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 1, 0, 0], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
# Test that no exceptions are raised.
_ = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
def test_validate_sum_is_identity_sum_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.is_identity_sum()
# Assert
assert actual is True
def test_validate_sum_is_identity_sum_ng(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 1, 0, 0], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
with pytest.raises(ValueError):
# ValueError: The sum of the elements of POVM must be an identity matrix.
_ = Povm(c_sys=c_sys, vecs=vecs)
def test_validate_sum_is_identity_sum_not_physical_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 1], dtype=np.float64)
p2 = np.array([1, 0, 0, 1], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
# Test that no exceptions are raised.
_ = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
def test_validate_is_positive_semidefinite_ok(self):
# Arrange
ps_1 = np.array([1, 0, 0, 0], dtype=np.float64)
ps_2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [ps_1, ps_2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.is_positive_semidefinite()
# Assert
assert actual is True
# Act
actual = povm.is_ineq_constraint_satisfied()
# Assert
assert actual is True
def test_validate_is_positive_semidefinite_ng(self):
# Arrange
ps = np.array([1, 0, 0, 2], dtype=np.float64)
not_ps = np.array([[0, 0, 0, -1]], dtype=np.float64)
vecs = [ps, not_ps]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
with pytest.raises(ValueError):
_ = Povm(c_sys=c_sys, vecs=vecs)
def test_validate_is_positive_semidefinite_not_physical_ok(self):
# Arrange
ps = np.array([1, 0, 0, 2], dtype=np.float64)
not_ps = np.array([0, 0, 0, -1], dtype=np.float64)
vecs = [ps, not_ps]
e_sys = esys.ElementalSystem(1, get_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
# Test that no exceptions are raised.
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
actual = povm.is_positive_semidefinite()
# Assert
assert actual is False
# Act
actual = povm.is_ineq_constraint_satisfied()
# Assert
assert actual is False
def test_calc_eigenvalues_all(self):
# Arrange
vec_1 = np.array([1, 0, 0, 0], dtype=np.float64)
vec_2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [vec_1, vec_2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.calc_eigenvalues()
# Assert
expected = [
np.array([1, 0], dtype=np.float64),
np.array([1, 0], dtype=np.float64),
]
assert len(actual) == len(expected)
npt.assert_almost_equal(actual[0], expected[0], decimal=15)
npt.assert_almost_equal(actual[1], expected[1], decimal=15)
def test_calc_eigenvalues_one(self):
# Arrange
vec_1 = np.array([1, 0, 0, 0], dtype=np.float64)
vec_2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [vec_1, vec_2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.calc_eigenvalues(0)
# Assert
expected = np.array([1, 0], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.calc_eigenvalues(1)
# Assert
expected = np.array([1, 0], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# def test_validate_dim_ng(self):
# # Arrange
# test_root_dir = Path(os.path.dirname(__file__)).parent.parent
# data_dir = test_root_dir / "data"
# dim = 2 ** 2 # 2 qubits
# num_state = 16
# num_povm = 9
# num_outcome = 4
# povms = s_io.load_povm_list(
# data_dir / "tester_2qubit_povm.csv",
# dim=dim,
# num_povm=num_povm,
# num_outcome=num_outcome,
# )
# vecs = list(povms[0]) # 2qubit
# e_sys = esys.ElementalSystem(1, get_pauli_basis()) # 1qubit
# c_sys = csys.CompositeSystem([e_sys])
# # Act & Assert
# with pytest.raises(ValueError):
# _ = Povm(c_sys=c_sys, vecs=vecs)
def test_convert_basis(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
ps_1 = np.array([1, 0, 0, 0], dtype=np.float64)
ps_2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [ps_1, ps_2]
povm = Povm(c_sys=c_sys, vecs=vecs)
to_basis = get_normalized_pauli_basis()
# Act
actual = povm.convert_basis(to_basis)
# Assert
expected = [
1 / np.sqrt(2) * np.array([1, 0, 0, 1], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, 0, 0, -1], dtype=np.float64),
]
assert len(actual) == len(expected)
for i, a in enumerate(actual):
assert np.all(a == expected[i])
def test_generate_mprocess(self):
### case: mode_backaction = 0
# Arrange
c_sys_1q = generate_composite_system(mode="qubit", num=1, ids_esys=[0])
povm_z = generate_qoperation_object(
mode="povm", object_name="povm", name="z", c_sys=c_sys_1q
)
# Act
actual = povm_z.generate_mprocess()
# Assert
expected = [
np.array([[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]]),
np.array(
[[0.5, 0, 0, -0.5], [0, 0, 0, 0], [0, 0, 0, 0], [-0.5, 0, 0, 0.5]]
),
]
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
### case: mode_backaction = 1
# Arrange
c_sys_1q = generate_composite_system(mode="qubit", num=1, ids_esys=[0])
povm_z = generate_qoperation_object(
mode="povm", object_name="povm", name="z", c_sys=c_sys_1q
)
# Act
actual = povm_z.generate_mprocess(mode_backaction=1)
# Assert
expected = [
np.array([[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]]),
np.array(
[[0.5, 0, 0, -0.5], [0, 0, 0, 0], [0, 0, 0, 0], [-0.5, 0, 0, 0.5]]
),
]
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
### case: mode_backaction = 2, post_selected_states = State
state_z0 = generate_qoperation_object(
mode="state", object_name="state", name="z0", c_sys=c_sys_1q
)
# Act
actual = povm_z.generate_mprocess(
mode_backaction=2, post_selected_states=state_z0
)
# Assert
expected = [
np.array([[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]]),
np.array(
[[0.5, 0, 0, -0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, -0.5]]
),
]
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
### case: mode_backaction = 3, post_selected_states = List[State]
state_z0 = generate_qoperation_object(
mode="state", object_name="state", name="z0", c_sys=c_sys_1q
)
state_z1 = generate_qoperation_object(
mode="state", object_name="state", name="z1", c_sys=c_sys_1q
)
# Act
actual = povm_z.generate_mprocess(
mode_backaction=2, post_selected_states=[state_z0, state_z1]
)
# Assert
expected = [
np.array([[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]]),
np.array(
[[0.5, 0, 0, -0.5], [0, 0, 0, 0], [0, 0, 0, 0], [-0.5, 0, 0, 0.5]]
),
]
for a, e in zip(actual.hss, expected):
npt.assert_almost_equal(a, e, decimal=15)
def test_generate_mprocess_error(self):
# Arrange
c_sys_1q = generate_composite_system(mode="qubit", num=1, ids_esys=[0])
povm_z = generate_qoperation_object(
mode="povm", object_name="povm", name="z", c_sys=c_sys_1q
)
### case: mode_backaction = 0 and post_selected_states is not None
with pytest.raises(ValueError):
povm_z.generate_mprocess(mode_backaction=0, post_selected_states="not None")
### case: mode_backaction = 1 and post_selected_states is not None
with pytest.raises(ValueError):
povm_z.generate_mprocess(mode_backaction=1, post_selected_states="not None")
### case: mode_backaction = 2 and post_selected_states is None
with pytest.raises(ValueError):
povm_z.generate_mprocess(mode_backaction=2, post_selected_states=None)
### case: mode_backaction = 3
with pytest.raises(ValueError):
povm_z.generate_mprocess(mode_backaction=3)
def test_measurements(self):
# Case 1:
# Arrange
basis1 = get_comp_basis()
e_sys1 = esys.ElementalSystem(1, basis1)
c_sys1 = csys.CompositeSystem([e_sys1])
vecs1 = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm1 = Povm(c_sys1, vecs1, is_physicality_required=False)
# Act
actual = povm1.nums_local_outcomes
# Assert
expected = [2]
assert len(actual) == len(expected)
for a, e in zip(actual, expected):
assert a == e
assert povm1.num_outcomes == 2
# Case 2:
# Act
povm1._nums_local_outcomes = [1, 2]
actual = povm1.nums_local_outcomes
# Assert
expected = [1, 2]
assert len(actual) == len(expected)
for a, e in zip(actual, expected):
assert a == e
assert povm1.num_outcomes == 2
def test_vec(self):
# Case 1:
# Arrange
basis1 = get_comp_basis()
e_sys1 = esys.ElementalSystem(1, basis1)
c_sys1 = csys.CompositeSystem([e_sys1])
vecs1 = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm1 = Povm(c_sys1, vecs1, is_physicality_required=False)
# Act
actual0 = povm1.vec(0)
actual1 = povm1.vec(1)
actual2 = povm1.vec((0))
actual3 = povm1.vec((1))
# Assert
assert np.all(actual0 == vecs1[0])
assert np.all(actual1 == vecs1[1])
assert np.all(actual2 == vecs1[0])
assert np.all(actual3 == vecs1[1])
# Case2: type of argument is tuple
# Arrange
basis2 = get_comp_basis()
e_sys2 = esys.ElementalSystem(2, basis2)
c_sys2 = csys.CompositeSystem([e_sys2])
vecs2 = [
np.array([23, 29, 31, 37], dtype=np.float64),
np.array([41, 43, 47, 53], dtype=np.float64),
]
povm2 = Povm(c_sys2, vecs2, is_physicality_required=False)
povm12 = tensor_product(povm1, povm2)
# Act
actual = [
povm12.vec((0, 0)),
povm12.vec((0, 1)),
povm12.vec((1, 0)),
povm12.vec((1, 1)),
]
# Assert
expected = [
np.kron(vec1, vec2)
for vec1, vec2 in itertools.product(povm1.vecs, povm2.vecs)
]
assert len(actual) == len(expected)
for a, e in zip(actual, expected):
assert np.all(a == e)
# Case3: type of argument is int
# Act
actual = [
povm12.vec(0),
povm12.vec(1),
povm12.vec(2),
povm12.vec(3),
]
# Assert
expected = [
np.kron(vec1, vec2)
for vec1, vec2 in itertools.product(povm1.vecs, povm2.vecs)
]
assert len(actual) == len(expected)
for a, e in zip(actual, expected):
assert np.all(a == e)
def test_vec_unexpected(self):
# Arrange
basis1 = get_comp_basis()
e_sys1 = esys.ElementalSystem(1, basis1)
c_sys1 = csys.CompositeSystem([e_sys1])
vecs1 = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm1 = Povm(c_sys1, vecs1, is_physicality_required=False)
# Case 1:
# Act & Assert
with pytest.raises(ValueError):
# ValueError: length of tuple does not equal length of the list of measurements.
_ = povm1.vec((0, 0))
# Case 2:
# Act & Assert
with pytest.raises(IndexError):
# IndexError: specified index does not exist in the list of measurements.
_ = povm1.vec(2)
def test_vec_multi_dimensional(self):
# Arrange
c_sys_2q = generate_composite_system(mode="qubit", num=2, ids_esys=[0, 1])
c_sys_1q = generate_composite_system(mode="qubit", num=1, ids_esys=[2])
bell = generate_qoperation_object(
mode="povm", object_name="povm", name="bell", c_sys=c_sys_2q
)
z = generate_qoperation_object(
mode="povm", object_name="povm", name="z", c_sys=c_sys_1q
)
povm = tensor_product(bell, z)
# Case 1:
# Act
actual = povm.vec((0, 0))
# Assert
expected = povm.vecs[0]
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2:
# Act
actual = povm.vec((0, 1))
# Assert
expected = povm.vecs[1]
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3:
# Act
actual = povm.vec((3, 1))
# Assert
expected = povm.vecs[7]
npt.assert_almost_equal(actual, expected, decimal=15)
def test_md_index2serial_index(self):
# Arrange
c_sys_2q = generate_composite_system(mode="qubit", num=2, ids_esys=[0, 1])
c_sys_1q = generate_composite_system(mode="qubit", num=1, ids_esys=[2])
bell = generate_qoperation_object(
mode="povm", object_name="povm", name="bell", c_sys=c_sys_2q
)
z = generate_qoperation_object(
mode="povm", object_name="povm", name="z", c_sys=c_sys_1q
)
povm = tensor_product(bell, z)
# Act & Assert
assert povm._md_index2serial_index((0, 0)) == 0
assert povm._md_index2serial_index((0, 1)) == 1
assert povm._md_index2serial_index((1, 0)) == 2
assert povm._md_index2serial_index((1, 1)) == 3
assert povm._md_index2serial_index((2, 0)) == 4
assert povm._md_index2serial_index((2, 1)) == 5
assert povm._md_index2serial_index((3, 0)) == 6
assert povm._md_index2serial_index((3, 1)) == 7
def test_is_physical(self):
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 0, 0, 1], dtype=np.float64)
povm = Povm(c_sys=c_sys, vecs=[p1, p2])
assert povm.is_physical() == True
p1 = np.array([1, 0, 0, 2], dtype=np.float64)
p2 = np.array([0, 0, 0, -1], dtype=np.float64)
povm = Povm(c_sys=c_sys, vecs=[p1, p2], is_physicality_required=False)
assert povm.is_physical() == False
p1 = np.array([1, 0, 0, 1], dtype=np.float64)
p2 = np.array([1, 0, 0, 1], dtype=np.float64)
povm = Povm(c_sys=c_sys, vecs=[p1, p2], is_physicality_required=False)
assert povm.is_physical() == False
def test_matrix(self):
# Case 1:
# Arrange
basis1 = get_comp_basis()
e_sys1 = esys.ElementalSystem(1, basis1)
c_sys1 = csys.CompositeSystem([e_sys1])
vecs1 = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm1 = Povm(c_sys1, vecs1, is_physicality_required=False)
# Act
actual = povm1.matrix(0)
# Assert
expected = povm1.matrices()
npt.assert_almost_equal(actual, expected[0], decimal=15)
# Act
actual = povm1.matrix(1)
# Assert
npt.assert_almost_equal(actual, expected[1], decimal=15)
# Case2:
# Arrange
basis2 = get_comp_basis()
e_sys2 = esys.ElementalSystem(2, basis2)
c_sys2 = csys.CompositeSystem([e_sys2])
vecs2 = [
np.array([23, 29, 31, 37], dtype=np.float64),
np.array([41, 43, 47, 53], dtype=np.float64),
]
povm2 = Povm(c_sys2, vecs2, is_physicality_required=False)
povm12 = tensor_product(povm1, povm2)
# Act
actual = povm12.matrix((0, 0))
# Assert
expected = povm12.matrices()
npt.assert_almost_equal(actual, expected[0], decimal=15)
# Act
actual = povm12.matrix((0, 1))
# Assert
npt.assert_almost_equal(actual, expected[1], decimal=15)
# Act
actual = povm12.matrix((1, 0))
# Assert
npt.assert_almost_equal(actual, expected[2], decimal=15)
# Act
actual = povm12.matrix((1, 1))
# Assert
npt.assert_almost_equal(actual, expected[3], decimal=15)
def test_matrix_unexpected(self):
# Arrange
basis1 = get_comp_basis()
e_sys1 = esys.ElementalSystem(1, basis1)
c_sys1 = csys.CompositeSystem([e_sys1])
vecs1 = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm1 = Povm(c_sys1, vecs1, is_physicality_required=False)
# Act & Assert
unexpected_type = [0]
with pytest.raises(TypeError):
# TypeError: The type of `key` must be int or str.
_ = povm1.matrix(unexpected_type)
def test_to_var(self):
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
# default
povm = Povm(c_sys, vecs, is_physicality_required=False)
# Act
actual = povm.to_var()
# Assert
expected = np.array([2, 3, 5, 7], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Arrange
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm = Povm(
c_sys, vecs, is_physicality_required=False, on_para_eq_constraint=True
)
# Actual
actual = povm.to_var()
# Assert
expected = np.array([2, 3, 5, 7], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Arrange
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm = Povm(
c_sys, vecs, is_physicality_required=False, on_para_eq_constraint=False
)
# Actual
actual = povm.to_var()
# Assert
expected = np.array([2, 3, 5, 7, 11, 13, 17, 19], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_generate_from_var(self):
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
vecs = [
1 / np.sqrt(2) * np.array([1, 1, 0, 0], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, -1, 0, 0], dtype=np.float64),
]
to_vecs = [
convert_vec(vec, get_normalized_pauli_basis(), c_sys.basis()).real.astype(
np.float64
)
for vec in vecs
]
init_is_physicality_required = False
init_is_estimation_object = True
init_on_para_eq_constraint = False
init_on_algo_eq_constraint = True
init_on_algo_ineq_constraint = False
init_eps_proj_physical = 10 ** (-3)
source_povm = Povm(
c_sys,
vecs=to_vecs,
is_physicality_required=init_is_physicality_required,
is_estimation_object=init_is_estimation_object,
on_para_eq_constraint=init_on_para_eq_constraint,
on_algo_eq_constraint=init_on_algo_eq_constraint,
on_algo_ineq_constraint=init_on_algo_ineq_constraint,
eps_proj_physical=init_eps_proj_physical,
)
# Case 1: default
var = np.array([2, 3, 5, 7, 11, 13, 17, 19], dtype=np.float64)
# Act
actual = source_povm.generate_from_var(var)
# Assert
expected = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for a, e in zip(actual.vecs, expected):
npt.assert_almost_equal(a, e, decimal=15)
assert actual._composite_system is c_sys
assert actual.is_physicality_required is init_is_physicality_required
assert actual.is_estimation_object is init_is_estimation_object
assert actual.on_para_eq_constraint is init_on_para_eq_constraint
assert actual.on_algo_eq_constraint is init_on_algo_eq_constraint
assert actual.on_algo_ineq_constraint is init_on_algo_ineq_constraint
assert actual.eps_proj_physical is init_eps_proj_physical
# Case 2:
with pytest.raises(ValueError):
# ValueError: the POVM is not physically correct.
_ = source_povm.generate_from_var(var, is_physicality_required=True)
# Case 3:
# Arrange
# var = np.array([2, 3, 5, 7], dtype=np.float64)
# source_is_estimation_object = False
# source_on_para_eq_constraint = True
# source_on_algo_eq_constraint = False
# source_on_algo_ineq_constraint = True
# source_eps_proj_physical = 10 ** (-2)
# Act
# actual = source_povm.generate_from_var(
# var,
# is_estimation_object=source_is_estimation_object,
# on_para_eq_constraint=source_on_para_eq_constraint,
# on_algo_eq_constraint=source_on_algo_eq_constraint,
# on_algo_ineq_constraint=source_on_algo_ineq_constraint,
# eps_proj_physical=source_eps_proj_physical,
# )
# Assert
# expected = [
# np.array([2, 3, 5, 7], dtype=np.float64),
# np.array([-1, -3, -5, -6], dtype=np.float64),
# ]
# assert len(actual.vecs) == len(expected)
# for a, e in zip(actual.vecs, expected):
# npt.assert_almost_equal(a, e, decimal=15)
# assert actual._composite_system is c_sys
# assert actual.is_physicality_required is init_is_physicality_required
# assert actual.is_estimation_object is source_is_estimation_object
# assert actual.on_para_eq_constraint is source_on_para_eq_constraint
# assert actual.on_algo_eq_constraint is source_on_algo_eq_constraint
# assert actual.on_algo_ineq_constraint is source_on_algo_ineq_constraint
# assert actual.eps_proj_physical == source_eps_proj_physical
# Case 4:
# Array
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
a0, a1, a2, a3 = 1, 1 / np.sqrt(2), 0, 1 / np.sqrt(2)
m1 = (1 / np.sqrt(2)) * np.array([a0, a1, a2, a3])
m2 = (1 / np.sqrt(2)) * np.array([2 - a0, -a1, -a2, -a3])
source_is_estimation_object = False
source_on_para_eq_constraint = True
source_on_algo_eq_constraint = False
source_on_algo_ineq_constraint = True
source_eps_proj_physical = 10 ** (-2)
source_povm = Povm(
vecs=[m1, m2],
c_sys=c_sys,
is_estimation_object=source_is_estimation_object,
on_para_eq_constraint=source_on_para_eq_constraint,
on_algo_eq_constraint=source_on_algo_eq_constraint,
on_algo_ineq_constraint=source_on_algo_ineq_constraint,
eps_proj_physical=source_eps_proj_physical,
)
var = source_povm.to_var()
source_is_estimation_object = False
source_on_para_eq_constraint = True
source_on_algo_eq_constraint = False
source_on_algo_ineq_constraint = True
source_eps_proj_physical = 10 ** (-2)
# Act
actual = source_povm.generate_from_var(
var,
is_estimation_object=source_is_estimation_object,
on_para_eq_constraint=source_on_para_eq_constraint,
on_algo_eq_constraint=source_on_algo_eq_constraint,
on_algo_ineq_constraint=source_on_algo_ineq_constraint,
eps_proj_physical=source_eps_proj_physical,
)
expected = source_povm.vecs
assert len(actual.vecs) == len(expected)
for a, e in zip(actual.vecs, expected):
npt.assert_almost_equal(a, e, decimal=15)
assert actual._composite_system is c_sys
assert actual.is_physicality_required is True # default
assert actual.is_estimation_object is source_is_estimation_object
assert actual.on_para_eq_constraint is source_on_para_eq_constraint
assert actual.on_algo_eq_constraint is source_on_algo_eq_constraint
assert actual.on_algo_ineq_constraint is source_on_algo_ineq_constraint
assert actual.eps_proj_physical == source_eps_proj_physical
def test_set_zero(self):
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
povm = get_x_povm(c_sys)
old_povm = copy.copy(povm)
# Act
povm.set_zero()
assert len(povm.vecs) == len(old_povm.vecs)
for actual, old in zip(povm.vecs, old_povm.vecs):
assert actual.size == old.size
expected = np.zeros(old.size, dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
assert povm.dim == old_povm.dim
assert povm.is_physicality_required == False
assert povm.is_estimation_object == old_povm.is_estimation_object
assert povm.on_para_eq_constraint == old_povm.on_para_eq_constraint
assert povm.on_algo_eq_constraint == old_povm.on_algo_eq_constraint
assert povm.on_algo_ineq_constraint == old_povm.on_algo_ineq_constraint
assert povm.eps_proj_physical == old_povm.eps_proj_physical
def test_generate_origin_obj(self):
# generate_origin_obj()
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_1 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_2 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_1, vec_2]
povm = Povm(
c_sys=c_sys,
vecs=vecs,
is_physicality_required=False,
is_estimation_object=True,
on_para_eq_constraint=False,
on_algo_eq_constraint=True,
on_algo_ineq_constraint=False,
eps_proj_physical=0.2,
)
# Act
actual = povm.generate_origin_obj()
expected_vecs = [
np.array([np.sqrt(2) / 2, 0, 0, 0], dtype=np.float64),
np.array([np.sqrt(2) / 2, 0, 0, 0], dtype=np.float64),
]
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm.eps_proj_physical
def test_add(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_11 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_12 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_11, vec_12]
povm_1 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
vec_21 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_22 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_21, vec_22]
povm_2 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act
actual = povm_1 + povm_2
# Assert
expected_vecs = [
np.array([11, 22, 33, 44], dtype=np.float64),
np.array([55, 66, 77, 88], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
def test_add_exception(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_11 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_12 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_11, vec_12]
povm_1 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Case 1:
# Arrange
state = get_x0_1q(c_sys)
# Act & Assert
with pytest.raises(TypeError):
_ = povm_1 + state
# Case 2:
# Arrange
vec_21 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_22 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_21, vec_22]
povm_2 = Povm(
c_sys=c_sys,
vecs=vecs,
is_physicality_required=False,
on_para_eq_constraint=False,
)
# Act & Assert
with pytest.raises(ValueError):
_ = povm_1 + povm_2
# Case 3:
# Arrange
vec_21 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_22 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_21, vec_22, vec_21]
povm_2 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act & Assert
with pytest.raises(ValueError):
_ = povm_1 + povm_2
# Case 4:
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_31 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_32 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_31, vec_32]
povm_3 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act & Assert
with pytest.raises(ValueError):
actual = povm_1 + povm_3
def test_sub(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_11 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_12 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_11, vec_12]
povm_1 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
vec_21 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_22 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_21, vec_22]
povm_2 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act
actual = povm_1 - povm_2
# Assert
expected_vecs = [
np.array([-9, -18, -27, -36], dtype=np.float64),
np.array([-45, -54, -63, -72], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
def test_sub_exception(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_11 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_12 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_11, vec_12]
povm_1 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Case 1:
# Arrange
state = get_x0_1q(c_sys)
# Act & Assert
with pytest.raises(TypeError):
_ = povm_1 - state
# Case 2:
# Arrange
vec_21 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_22 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_21, vec_22]
povm_2 = Povm(
c_sys=c_sys,
vecs=vecs,
is_physicality_required=False,
on_para_eq_constraint=False,
)
# Act & Assert
with pytest.raises(ValueError):
_ = povm_1 - povm_2
# Case 3:
# Arrange
vec_21 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_22 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_21, vec_22, vec_21]
povm_2 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act & Assert
with pytest.raises(ValueError):
_ = povm_1 - povm_2
# Case 4:
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_31 = np.array([10, 20, 30, 40], dtype=np.float64)
vec_32 = np.array([50, 60, 70, 80], dtype=np.float64)
vecs = [vec_31, vec_32]
povm_3 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act & Assert
with pytest.raises(ValueError):
actual = povm_1 - povm_3
def test_mul(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_11 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_12 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_11, vec_12]
povm_1 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Case 1:
# Act
actual = povm_1 * 10
# Assert
expected_vecs = [
np.array([10, 20, 30, 40], dtype=np.float64),
np.array([50, 60, 70, 80], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
# Case 2:
# Act
actual = povm_1 * 0.1
# Assert
expected_vecs = [
np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float64),
np.array([0.5, 0.6, 0.7, 0.8], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
# Case 3: Exception
# Act & Assert
with pytest.raises(TypeError):
_ = povm_1 * povm_1
def test_rmul(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_11 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_12 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_11, vec_12]
povm_1 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Case 1:
# Act
actual = 10 * povm_1
# Assert
expected_vecs = [
np.array([10, 20, 30, 40], dtype=np.float64),
np.array([50, 60, 70, 80], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
# Case 2:
# Act
actual = 0.1 * povm_1
# Assert
expected_vecs = [
np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float64),
np.array([0.5, 0.6, 0.7, 0.8], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
# Case 3: Exception
# Act & Assert
with pytest.raises(TypeError):
_ = povm_1 * povm_1
def test_truediv(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
vec_11 = np.array([1, 2, 3, 4], dtype=np.float64)
vec_12 = np.array([5, 6, 7, 8], dtype=np.float64)
vecs = [vec_11, vec_12]
povm_1 = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Case 1:
# Act
actual = povm_1 / 10
# Assert
expected_vecs = [
np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float64),
np.array([0.5, 0.6, 0.7, 0.8], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
# Case 2:
# Act
actual = povm_1 / 0.1
# Assert
expected_vecs = [
np.array([10, 20, 30, 40], dtype=np.float64),
np.array([50, 60, 70, 80], dtype=np.float64),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
# Case 4:
# Act
actual = povm_1 / 0
# Assert
expected_vecs = [
np.array(
[float("inf"), float("inf"), float("inf"), float("inf")],
dtype=np.float64,
),
np.array(
[float("inf"), float("inf"), float("inf"), float("inf")],
dtype=np.float64,
),
]
assert type(actual) == Povm
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.is_physicality_required == False
assert actual.is_estimation_object == False
assert actual.on_para_eq_constraint == povm_1.on_para_eq_constraint
assert actual.on_algo_eq_constraint == povm_1.on_algo_eq_constraint
assert actual.on_algo_ineq_constraint == povm_1.on_algo_ineq_constraint
assert actual.eps_proj_physical == povm_1.eps_proj_physical
# Case 3: Exception
# Act & Assert
with pytest.raises(TypeError):
_ = povm_1 / povm_1
# Case 4: Exception
# Act & Assert
with pytest.raises(TypeError):
_ = 1 / povm_1
def test_calc_proj_eq_constraint(self):
# Case 1:
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
povm = get_x_povm(c_sys)
# Act
actual = povm.calc_proj_eq_constraint()
vec_1 = (
povm.vecs[0]
- (1 / 2) * np.array([2 / np.sqrt(2), 0, 0, 0])
+ np.array([np.sqrt(2) / 2, 0, 0, 0])
)
vec_2 = (
povm.vecs[1]
- (1 / 2) * np.array([2 / np.sqrt(2), 0, 0, 0])
+ np.array([np.sqrt(2) / 2, 0, 0, 0])
)
vecs = [vec_1, vec_2]
expected_vecs = [vec_1, vec_2]
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
def test_calc_proj_eq_constraint_with_var(self):
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
povm = get_x_povm(c_sys)
vec_1 = (
povm.vecs[0]
- (1 / 2) * np.array([2 / np.sqrt(2), 0, 0, 0])
+ np.array([np.sqrt(2) / 2, 0, 0, 0])
)
vec_2 = (
povm.vecs[1]
- (1 / 2) * np.array([2 / np.sqrt(2), 0, 0, 0])
+ np.array([np.sqrt(2) / 2, 0, 0, 0])
)
vecs = [vec_1, vec_2]
# Case 1: default
actual = povm.calc_proj_eq_constraint_with_var(c_sys, vec_1)
expected = vec_1
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2: on_para_eq_constraint=True
actual = povm.calc_proj_eq_constraint_with_var(
c_sys, vec_1, on_para_eq_constraint=True
)
expected = vec_1
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3: on_para_eq_constraint=False
actual = povm.calc_proj_eq_constraint_with_var(
c_sys, np.hstack(vecs), on_para_eq_constraint=False
)
expected = np.hstack(vecs)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_calc_proj_ineq_constraint(self):
# Case 1:
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
povm = get_x_povm(c_sys)
# Act
actual = povm.calc_proj_ineq_constraint()
vec_1 = np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0, 0])
vec_2 = np.array([1 / np.sqrt(2), -1 / np.sqrt(2), 0, 0])
vecs = [vec_1, vec_2]
expected_vecs = [vec_1, vec_2]
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
def test_calc_proj_ineq_constraint_with_var(self):
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
povm = get_x_povm(c_sys)
vec_1 = np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0, 0])
vec_2 = np.array([1 / np.sqrt(2), -1 / np.sqrt(2), 0, 0])
vecs = [vec_1, vec_2]
# Case 1: default
actual = povm.calc_proj_ineq_constraint_with_var(c_sys, vec_1)
expected = vec_1
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2: on_para_eq_constraint=True
actual = povm.calc_proj_ineq_constraint_with_var(
c_sys, vec_1, on_para_eq_constraint=True
)
expected = vec_1
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3: on_para_eq_constraint=False
actual = povm.calc_proj_ineq_constraint_with_var(
c_sys, np.hstack(vecs), on_para_eq_constraint=False
)
expected = np.hstack(vecs)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_calc_gradient(self):
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
# Case 1: default
# Arrange
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act
actual = povm.calc_gradient(3)
# Assert
expected_vecs = [
np.array([0, 0, 0, 1], dtype=np.float64),
np.array([0, 0, 0, 0], dtype=np.float64),
]
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.composite_system is povm.composite_system
assert actual.is_physicality_required == False
assert actual.is_estimation_object == True
assert actual.on_para_eq_constraint == True
assert actual.on_algo_eq_constraint == True
assert actual.on_algo_ineq_constraint == True
assert actual.eps_proj_physical == Settings.get_atol() / 10.0
# Case 2:
# Arrange
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm = Povm(
c_sys=c_sys,
vecs=vecs,
is_physicality_required=False,
on_para_eq_constraint=False,
)
# Act
actual = povm.calc_gradient(7)
# Assert
expected_vecs = [
np.array([0, 0, 0, 0], dtype=np.float64),
np.array([0, 0, 0, 1], dtype=np.float64),
]
assert len(actual.vecs) == len(expected_vecs)
for a, e in zip(actual.vecs, expected_vecs):
npt.assert_almost_equal(a, e, decimal=15)
assert actual.composite_system is povm.composite_system
def test_to_stacked_vector(self):
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act
actual = povm.to_stacked_vector()
# Assert
expected = np.array([2, 3, 5, 7, 11, 13, 17, 19])
npt.assert_almost_equal(actual, expected, decimal=15)
def test_calc_proj_eq_constraint_unexpected(self):
# Array
e_sys = esys.ElementalSystem(0, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
m_1 = (1 / 2) * np.array([1, 0, 0, 1])
m_2 = (1 / 2) * np.array([1, 0, 0, 1])
vecs = [m_1, m_2]
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Act & Assert
with pytest.raises(ValueError):
_ = povm.calc_proj_eq_constraint()
def test_calc_proj(self):
# Array
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
# Case 1
# ">=": True "=I": True
# Array
m_1 = np.array([1 / np.sqrt(2), 0, 0, 0])
m_2 = np.array([1 / np.sqrt(2), 0, 0, 0])
vecs = [m_1, m_2]
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Make sure the prerequisites are met
assert povm.is_positive_semidefinite() is True
assert povm.is_identity_sum() is True
# Act
actual_ineq = povm.calc_proj_ineq_constraint()
actual_eq = povm.calc_proj_eq_constraint()
# Assert
assert actual_ineq.is_positive_semidefinite() is True
npt.assert_almost_equal(povm.vecs, actual_ineq.vecs, decimal=15)
assert actual_eq.is_identity_sum() is True
npt.assert_almost_equal(povm.vecs, actual_eq.vecs, decimal=15)
# Case 2
# ">=": False "=I": True
# Array
m_1 = np.array([np.sqrt(2), 0.0, 0.0, np.sqrt(2)])
m_2 = np.array([0, 0.0, 0.0, -np.sqrt(2)])
vecs = [m_1, m_2]
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Make sure the prerequisites are met
assert povm.is_positive_semidefinite() is False
assert povm.is_identity_sum() is True
# Act
actual_ineq = povm.calc_proj_ineq_constraint()
actual_eq = povm.calc_proj_eq_constraint()
# Assert
assert actual_ineq.is_positive_semidefinite() is True
assert not np.allclose(povm.vecs, actual_ineq.vecs)
assert actual_eq.is_identity_sum() is True
npt.assert_almost_equal(povm.vecs, actual_eq.vecs, decimal=15)
# Case 3
# ">=": True "=I": False
# Array
m_1 = np.array([np.sqrt(2), 0.0, 0.0, 1.0])
m_2 = np.array([np.sqrt(2), 0.0, 0.0, 1.0])
vecs = [m_1, m_2]
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Make sure the prerequisites are met
assert povm.is_positive_semidefinite() is True
assert povm.is_identity_sum() is False
# Act
actual_ineq = povm.calc_proj_ineq_constraint()
actual_eq = povm.calc_proj_eq_constraint()
# Assert
assert actual_ineq.is_positive_semidefinite() is True
npt.assert_almost_equal(povm.vecs, actual_ineq.vecs, decimal=15)
assert actual_eq.is_identity_sum() is True
assert not np.allclose(povm.vecs, actual_eq.vecs)
# Case 4
# ">=": False "=I": False
# Array
m_1 = np.array([np.sqrt(2), 0.0, 0.0, 1.0])
m_2 = np.array([-np.sqrt(2), 0.0, 0.0, 1.0])
vecs = [m_1, m_2]
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
# Make sure the prerequisites are met
assert povm.is_positive_semidefinite() is False
assert povm.is_identity_sum() is False
# Act
actual_ineq = povm.calc_proj_ineq_constraint()
actual_eq = povm.calc_proj_eq_constraint()
# Assert
assert actual_ineq.is_positive_semidefinite() is True
assert not np.allclose(povm.vecs, actual_ineq.vecs)
assert actual_eq.is_identity_sum() is True
assert not np.allclose(povm.vecs, actual_eq.vecs)
def test_convert_var_to_stacked_vector(self):
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
povm = get_z_povm(c_sys)
# Case 1: default
vecs = [
np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64),
np.array([1, 0, 0, -1] / np.sqrt(2), dtype=np.float64),
]
actual = povm.convert_var_to_stacked_vector(c_sys, vecs[0])
npt.assert_almost_equal(actual, np.hstack(vecs), decimal=15)
# Case 2: on_para_eq_constraint=True
vecs = [
np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64),
np.array([1, 0, 0, -1] / np.sqrt(2), dtype=np.float64),
]
actual = povm.convert_var_to_stacked_vector(
c_sys, vecs[0], on_para_eq_constraint=True
)
npt.assert_almost_equal(actual, np.hstack(vecs), decimal=15)
# Case 3: on_para_eq_constraint=False
vecs = [
np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64),
np.array([1, 0, 0, -1] / np.sqrt(2), dtype=np.float64),
]
actual = povm.convert_var_to_stacked_vector(
c_sys, np.hstack(vecs), on_para_eq_constraint=False
)
npt.assert_almost_equal(actual, np.hstack(vecs), decimal=15)
def test_convert_stacked_vector_to_var(self):
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
povm = get_z_povm(c_sys)
# Case 1: default
vecs = [
np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64),
np.array([1, 0, 0, -1] / np.sqrt(2), dtype=np.float64),
]
actual = povm.convert_stacked_vector_to_var(c_sys, np.hstack(vecs))
expected = np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2: on_para_eq_constraint=True
vecs = [
np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64),
np.array([1, 0, 0, -1] / np.sqrt(2), dtype=np.float64),
]
actual = povm.convert_stacked_vector_to_var(
c_sys, np.hstack(vecs), on_para_eq_constraint=True
)
expected = np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3: on_para_eq_constraint=False
vecs = [
np.array([1, 0, 0, 1] / np.sqrt(2), dtype=np.float64),
np.array([1, 0, 0, -1] / np.sqrt(2), dtype=np.float64),
]
actual = povm.convert_stacked_vector_to_var(
c_sys, np.hstack(vecs), on_para_eq_constraint=False
)
expected = np.array([1, 0, 0, 1, 1, 0, 0, -1] / np.sqrt(2), dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_convert_var_index_to_povm_index():
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
# default
actual = convert_var_index_to_povm_index(c_sys, vecs, 3)
assert actual == (0, 3)
# on_para_eq_constraint=True
actual = convert_var_index_to_povm_index(c_sys, vecs, 3, on_para_eq_constraint=True)
assert actual == (0, 3)
# on_para_eq_constraint=False
actual = convert_var_index_to_povm_index(
c_sys, vecs, 7, on_para_eq_constraint=False
)
assert actual == (1, 3)
def test_convert_povm_index_to_var_index():
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
# default
actual = convert_povm_index_to_var_index(c_sys, vecs, (0, 3))
assert actual == 3
# on_para_eq_constraint=True
actual = convert_povm_index_to_var_index(
c_sys, vecs, (0, 3), on_para_eq_constraint=True
)
assert actual == 3
# on_para_eq_constraint=False
actual = convert_povm_index_to_var_index(
c_sys, vecs, (1, 3), on_para_eq_constraint=False
)
assert actual == 7
def test_convert_var_to_povm():
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
# Case 1: default
# Arrange
# vecs = np.array([2, 3, 5, 7], dtype=np.float64)
# # Act
# actual = convert_var_to_povm(c_sys, vecs, is_physicality_required=False)
# # Assert
# expected = [
# np.array([2, 3, 5, 7], dtype=np.float64),
# np.array([-1, -3, -5, -6], dtype=np.float64),
# ]
# assert len(actual.vecs) == len(expected)
# for a, e in zip(actual.vecs, expected):
# npt.assert_almost_equal(a, e, decimal=15)
# Case 2: on_para_eq_constraint=True
# Arrange
# vecs = np.array([2, 3, 5, 7], dtype=np.float64)
# # Act
# actual = convert_var_to_povm(
# c_sys, vecs, on_para_eq_constraint=True, is_physicality_required=False
# )
# # Assert
# expected = [
# np.array([2, 3, 5, 7], dtype=np.float64),
# np.array([-1, -3, -5, -6], dtype=np.float64),
# ]
# assert len(actual.vecs) == len(expected)
# for a, e in zip(actual.vecs, expected):
# npt.assert_almost_equal(a, e, decimal=15)
# Case 3: on_para_eq_constraint=False
# Arrange
vecs = np.array([2, 3, 5, 7, 11, 13, 17, 19], dtype=np.float64)
# Act
actual = convert_var_to_povm(
c_sys, vecs, on_para_eq_constraint=False, is_physicality_required=False
)
# Assert
expected = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for a, e in zip(actual.vecs, expected):
npt.assert_almost_equal(a, e, decimal=15)
# Case 4:
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
a0, a1, a2, a3 = 1, 1 / np.sqrt(2), 0, 1 / np.sqrt(2)
m1 = (1 / np.sqrt(2)) * np.array([a0, a1, a2, a3])
m2 = (1 / np.sqrt(2)) * np.array([2 - a0, -a1, -a2, -a3])
true_object = Povm(vecs=[m1, m2], c_sys=c_sys)
true_var = true_object.to_var()
actual = convert_var_to_povm(c_sys, true_var, is_physicality_required=False)
expected = true_object.vecs
assert len(actual.vecs) == len(expected)
for a, e in zip(actual.vecs, expected):
npt.assert_almost_equal(a, e, decimal=15)
def test_convert_vecs_to_var():
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
# Case 1: default
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
# Act
actual = convert_vecs_to_var(c_sys, vecs)
# Assert
expected = np.array([2, 3, 5, 7], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 2: on_para_eq_constraint=True
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
# Act
actual = convert_vecs_to_var(c_sys, vecs, on_para_eq_constraint=True)
# Assert
expected = np.array([2, 3, 5, 7], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Case 3: on_para_eq_constraint=False
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
# Act
actual = convert_vecs_to_var(c_sys, vecs, on_para_eq_constraint=False)
# Assert
expected = np.array([2, 3, 5, 7, 11, 13, 17, 19], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_calc_gradient_from_povm():
# Arrange
e_sys = esys.ElementalSystem(0, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
# default
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
actual = calc_gradient_from_povm(c_sys, vecs, 3)
expected = [
np.array([0, 0, 0, 1], dtype=np.float64),
np.array([0, 0, 0, 0], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for a, e in zip(actual.vecs, expected):
npt.assert_almost_equal(a, e, decimal=15)
# on_para_eq_constraint=True
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
actual = calc_gradient_from_povm(c_sys, vecs, 3, on_para_eq_constraint=True)
expected = [
np.array([0, 0, 0, 1], dtype=np.float64),
np.array([0, 0, 0, 0], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for a, e in zip(actual.vecs, expected):
npt.assert_almost_equal(a, e, decimal=15)
# on_para_eq_constraint=False
vecs = [
np.array([2, 3, 5, 7], dtype=np.float64),
np.array([11, 13, 17, 19], dtype=np.float64),
]
actual = calc_gradient_from_povm(c_sys, vecs, 7, on_para_eq_constraint=False)
expected = [
np.array([0, 0, 0, 0], dtype=np.float64),
np.array([0, 0, 0, 1], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for a, e in zip(actual.vecs, expected):
npt.assert_almost_equal(a, e, decimal=15)
def test_get_x_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_comp_basis())
c_sys1 = csys.CompositeSystem([e_sys1])
# Act
actual = get_x_povm(c_sys1)
# Assert
expected = [
1 / 2 * np.array([1, 1, 1, 1], dtype=np.float64),
1 / 2 * np.array([1, -1, -1, 1], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
# Test that not 1qubit CompositeSystem
e_sys2 = esys.ElementalSystem(2, get_comp_basis())
c_sys2 = csys.CompositeSystem([e_sys1, e_sys2])
with pytest.raises(ValueError):
get_x_povm(c_sys2)
# Test that not 2-dim CompositeSystem
e_sys3 = esys.ElementalSystem(3, get_gell_mann_basis())
c_sys3 = csys.CompositeSystem([e_sys3])
with pytest.raises(ValueError):
get_x_povm(c_sys3)
def test_get_y_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_normalized_pauli_basis())
c_sys1 = csys.CompositeSystem([e_sys1])
# Act
actual = get_y_povm(c_sys1)
# Assert
expected = [
1 / np.sqrt(2) * np.array([1, 0, 1, 0], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, 0, -1, 0], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
# Test that not 1qubit CompositeSystem
e_sys2 = esys.ElementalSystem(2, get_comp_basis())
c_sys2 = csys.CompositeSystem([e_sys1, e_sys2])
with pytest.raises(ValueError):
get_y_povm(c_sys2)
# Test that not 2-dim CompositeSystem
e_sys3 = esys.ElementalSystem(3, get_gell_mann_basis())
c_sys3 = csys.CompositeSystem([e_sys3])
with pytest.raises(ValueError):
get_y_povm(c_sys3)
def test_get_z_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_comp_basis())
c_sys1 = csys.CompositeSystem([e_sys1])
# Act
actual = get_z_povm(c_sys1)
# Assert
expected = [
np.array([1, 0, 0, 0], dtype=np.float64),
np.array([0, 0, 0, 1], dtype=np.float64),
]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
# Test that not 1qubit CompositeSystem
e_sys2 = esys.ElementalSystem(2, get_comp_basis())
c_sys2 = csys.CompositeSystem([e_sys1, e_sys2])
with pytest.raises(ValueError):
get_z_povm(c_sys2)
# Test that not 2-dim CompositeSystem
e_sys3 = esys.ElementalSystem(3, get_gell_mann_basis())
c_sys3 = csys.CompositeSystem([e_sys3])
with pytest.raises(ValueError):
get_z_povm(c_sys3)
def test_get_xx_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_comp_basis())
e_sys2 = esys.ElementalSystem(2, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys1, e_sys2])
# Act
actual = get_xx_povm(c_sys)
# Assert
vecs1 = [
1 / 2 * np.array([1, 1, 1, 1], dtype=np.float64),
1 / 2 * np.array([1, -1, -1, 1], dtype=np.float64),
]
vecs2 = [
1 / 2 * np.array([1, 1, 1, 1], dtype=np.float64),
1 / 2 * np.array([1, -1, -1, 1], dtype=np.float64),
]
expected = [np.kron(vec1, vec2) for vec1, vec2 in itertools.product(vecs1, vecs2)]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
def test_get_xy_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_comp_basis())
e_sys2 = esys.ElementalSystem(2, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys1, e_sys2])
# Act
actual = get_xy_povm(c_sys)
# Assert
vecs1 = [
1 / 2 * np.array([1, 1, 1, 1], dtype=np.float64),
1 / 2 * np.array([1, -1, -1, 1], dtype=np.float64),
]
vecs2 = [
1 / np.sqrt(2) * np.array([1, 0, 1, 0], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, 0, -1, 0], dtype=np.float64),
]
expected = [np.kron(vec1, vec2) for vec1, vec2 in itertools.product(vecs1, vecs2)]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
def test_get_xz_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_comp_basis())
e_sys2 = esys.ElementalSystem(2, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys1, e_sys2])
# Act
actual = get_xz_povm(c_sys)
# Assert
vecs1 = [
1 / 2 * np.array([1, 1, 1, 1], dtype=np.float64),
1 / 2 * np.array([1, -1, -1, 1], dtype=np.float64),
]
vecs2 = [
np.array([1, 0, 0, 0], dtype=np.float64),
np.array([0, 0, 0, 1], dtype=np.float64),
]
expected = [np.kron(vec1, vec2) for vec1, vec2 in itertools.product(vecs1, vecs2)]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
def test_get_yx_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_normalized_pauli_basis())
e_sys2 = esys.ElementalSystem(2, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys1, e_sys2])
# Act
actual = get_yx_povm(c_sys)
# Assert
vecs1 = [
1 / np.sqrt(2) * np.array([1, 0, 1, 0], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, 0, -1, 0], dtype=np.float64),
]
vecs2 = [
1 / 2 * np.array([1, 1, 1, 1], dtype=np.float64),
1 / 2 * np.array([1, -1, -1, 1], dtype=np.float64),
]
expected = [np.kron(vec1, vec2) for vec1, vec2 in itertools.product(vecs1, vecs2)]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
def test_get_yy_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_normalized_pauli_basis())
e_sys2 = esys.ElementalSystem(2, get_normalized_pauli_basis())
c_sys = csys.CompositeSystem([e_sys1, e_sys2])
# Act
actual = get_yy_povm(c_sys)
# Assert
vecs1 = [
1 / np.sqrt(2) * np.array([1, 0, 1, 0], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, 0, -1, 0], dtype=np.float64),
]
vecs2 = [
1 / np.sqrt(2) * np.array([1, 0, 1, 0], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, 0, -1, 0], dtype=np.float64),
]
expected = [np.kron(vec1, vec2) for vec1, vec2 in itertools.product(vecs1, vecs2)]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
npt.assert_almost_equal(a, expected[i], decimal=15)
def test_get_yz_povm():
# Arrange
e_sys1 = esys.ElementalSystem(1, get_normalized_pauli_basis())
e_sys2 = esys.ElementalSystem(2, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys1, e_sys2])
# Act
actual = get_yz_povm(c_sys)
# Assert
vecs1 = [
1 / np.sqrt(2) * np.array([1, 0, 1, 0], dtype=np.float64),
1 / np.sqrt(2) * np.array([1, 0, -1, 0], dtype=np.float64),
]
vecs2 = [
np.array([1, 0, 0, 0], dtype=np.float64),
np.array([0, 0, 0, 1], dtype=np.float64),
]
expected = [np.kron(vec1, vec2) for vec1, vec2 in itertools.product(vecs1, vecs2)]
assert len(actual.vecs) == len(expected)
for i, a in enumerate(actual):
| npt.assert_almost_equal(a, expected[i], decimal=15) | numpy.testing.assert_almost_equal |
import itertools
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
import glob
from astropy.io import fits
import matplotlib as mpl
import matplotlib.patches as mpatches
from skimage import filters, segmentation, morphology
from skimage.measure import label, regionprops
from skimage.color import label2rgb
from sklearn import cluster
from sklearn.cluster import KMeans
import multiprocessing as mp
import warnings
import os
import pandas as pd
from tqdm import tqdm
def load_fits_data(filepath):
"""
Load data/header from .fits file
:param filepath:
:return: header, data
"""
try:
with fits.open(filepath) as hdul:
# Beamline 12
hdr = hdul[0].header
data = hdul[2].data
except IndexError:
with fits.open(filepath) as hdul:
# Cosmic
try:
hdr = hdul[0].header
data = hdul[0].data
except IndexError:
print(hdul.info())
return data, hdr
def load_all_data(folder_path, n_files=None):
"""
Load in all .fits files from a given directory
:param folder_path:
:return: data, hdr
"""
hdr = []
data = []
for ii, file in tqdm(enumerate(sorted(glob.glob(folder_path + '*.fits'))), desc='loading data'):
if n_files is not None:
if n_files <= ii:
break
tmp = load_fits_data(file)
hdr.append(tmp[1])
data.append(tmp[0])
data = np.asarray(data)
print(f'Loaded data shape: {data.shape}')
return np.squeeze(data), hdr
def filter_image_data(data, tol=0.000_000_005, min_size=10):
"""
Isolate speckles from image data. Most likely needs to already be of just an roi.
TUNING PARAMETERS: segmentation.flood(..., tolerance), very finicky
mophology.remove_small_objects(min_size)
:param data: roi image
:return: input image but non-speckle features are 0
"""
erode = morphology.erosion(data) # remove small background noise
sobel = filters.sobel(erode) # edge detection
coords = np.unravel_index(np.argmin(data), data.shape)
flood = np.invert(segmentation.flood(sobel, coords, tolerance=tol)) # fill to create mask for speckle only
mask = morphology.remove_small_objects(flood, min_size=min_size) # clean up small mask bits
return data * mask
def label_image(img):
"""
label each section of the image
:param img:
:return: original image but labelled
"""
bool_img = morphology.closing(
img.astype(bool)) # Connectivity is defined by having same value so need to convert to bool
label_image = label(bool_img)
return label_image
def cluster_single_speckle_kmeans(img, speckle_size):
"""
cluster points using kmeans algorithm. includes both location and value of points
:param img: roi img
:param speckle_size: number of pixels in a speckle
:return: kmeans clustering of points
"""
points = np.asarray(np.where(img)).T
weights = img[ | np.where(img) | numpy.where |
'''
Implements the class-conditional HMM where the class label specifies one of C possible words, chosen from some vocabulary.
If the class is a word of length T, it has a deterministic left-to-right state transition matrix A
with T possible states.The t'th state should have an categorical emission distribution which generates t'th letter
in lower case with probability p1, in upper case with probability p1, a blank character "-" with prob p2,
and a random letter with prob p3.
Author : <NAME> (@karalleyna)
'''
import numpy as np
import matplotlib.pyplot as plt
from hmm_lib import HMMDiscrete
class Word:
'''
This class consists of components needed for a class-conditional Hidden Markov Model
with categorical distribution
Parameters
----------
word: str
Class label representing a word
p1: float
The probability of the uppercase and lowercase letter included within
a word for the current state
p2: float
The probability of the blank character
p3: float
The probability of the uppercase and lowercase letters except correct one
L : int
The number of letters used when constructing words
type_ : str
"all" : Includes both uppercase and lowercase letters
"lower" : Includes only lowercase letters
"upper" : Includes only uppercase letters
'''
def __init__(self, word, p1, p2, p3, L, type_):
self.word, self.T = word, len(word)
self.p1, self.p2, self.p3 = p1, p2, p3
self.type_ = type_
self.L = 2 * L if self.type_ == 'all' else L
self.init_state_dist = np.zeros((self.T + 1,))
self.init_state_dist[0] = 1
self.init_state_transition_matrix()
self.init_emission_probs()
def init_state_transition_matrix(self):
assert self.T > 0
A = np.zeros((self.T + 1, self.T + 1)) # transition-probability matrix
A[:-1, 1:] = np.eye(self.T)
A[-1, 0] = 1
self.A = A
def emission_prob_(self, letter):
ascii_no = ord(letter.upper()) - 65 # 65 :ascii number of A
idx = [ascii_no, ascii_no + self.L // 2] if self.type_ == 'all' else ascii_no
emission_prob = np.full((1, self.L), self.p3)
emission_prob[:, idx] = self.p1
return emission_prob
def init_emission_probs(self):
self.B = np.zeros((self.T, self.L)) # observation likelihoods
for i in range(self.T):
self.B[i] = self.emission_prob_(self.word[i])
self.B = np.c_[self.B, np.full((self.T, 1), self.p2), np.zeros((self.T, 1))]
self.B = np.r_[self.B, np.zeros((1, self.L + 2))]
self.B[-1, -1] = 1
def sample(self, n_word, random_states=None):
'''
n_word: int
The number of times sampled a word by HMMDiscrete
random_states: List[int]
The random states each of which is given to HMMDiscrete.sample as a parameter
'''
random_states = | np.random.randint(0, 2 * n_word, n_word) | numpy.random.randint |
"""This code is part of caerus and is not designed for usage of seperate parts."""
#--------------------------------------------------------------------------
# Name : caerus.py
# Author : E.Taskesen
# Contact : <EMAIL>
# Date : May. 2020
#--------------------------------------------------------------------------
from caerus.utils.ones2idx import ones2region, idx2region
import pandas as pd
import numpy as np
from tqdm import tqdm
import warnings
warnings.filterwarnings(action='ignore', message='Mean of empty slice')
# %% utils
def _check_input(X):
# Convert to dataframe
if isinstance(X, pd.DataFrame): raise Exception('[caerus] >Error: Input data must be of type numpy-array or list.')
if 'numpy' in str(type(X)) or 'list' in str(type(X)): X = pd.Series(X)
if X.shape[0]!=X.size: raise Exception('[caerus] >Error : Input dataframe can only be a 1D-vector.')
# reset index
X.reset_index(drop=True, inplace=True)
return X
# %% Aggregation of scores over the windows and intervals
def _agg_scores(out, threshold=0):
outagg=np.nansum(out>0, axis=1)
# Normalize for the window size that is used. Towards the end smaller windows are only avaialbe which is otherwise unfair for the threshold usage.
windowCorrectionFactor = np.ones_like(outagg)*out.shape[1]
tmpvalue = | np.arange(1, out.shape[1]) | numpy.arange |
import numpy as np
def one_step_lookahead(environment, state, V, discount_factor):
"""
helper function to calculate the value function
"""
#Creating a vector of dimensionally same size as the number of actions
action_values=np.zeros(environment.nA)
for action in range(environment.nA):
for probability, next_state, reward, terminated in environment.P[state][action]: #policy
action_values[action] += probability * (reward + discount_factor * V[next_state])
return action_values
def policy_evaluation(policy, environment, discount_factor=1.0, theta=1e-9, max_iteration=1e9):
"""
evaluate a policy given a deterministic environment
1)policy : Matrix of size nS*nA. Each cell reprents the probability of
taking an action in a particular state
2)Environment : openAI environment object
3)discount_factor:
4)theta: Convergence factor. If the change in value function for all
states is below theta, we are done.
5)max_iteration: To avoid infinite looping.
Returns:
1)V:The optimum value estimate for the given policy
"""
evaluation_iteration = 1 # to record the number of iteration
V=np.zeros(environment.nS)
for i in range(int(max_iteration)):
delta = 0 #for early stopping
for state in range(environment.nS):
v=0
for action, action_probability in enumerate(policy[state]):
for state_probability, next_state, reward, terminated in environment.P[state][action]:
#print(state, next_state,state_probability)
v+= action_probability * state_probability * (reward + discount_factor *V[next_state])
delta= max(delta, abs(V[state]-v)) #looks like a mistake here.Check
V[state]=v
evaluation_iteration +=1
if(delta < theta):
print('Policy evaluated in %d iteration' % evaluation_iteration)
return V
def policy_iteration(environment, discount_factor=1.0, max_iteration=1e9):
"""
In this function, we would take a random policy and evaluate the optimum
value function of the policy ,act greedily on the policy and work for the
new better policy.
"""
policy= | np.ones((environment.nS, environment.nA)) | numpy.ones |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from pyiron_base._tests import PyironTestCase
from pyiron_continuum.mesh import (
RectMesh,
callable_to_array,
takes_scalar_field,
takes_vector_field,
has_default_accuracy
)
import numpy as np
import pyiron_continuum.mesh as mesh_mod
class TestDecorators(PyironTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mesh = RectMesh([1, 2, 3], [30, 20, 10])
@staticmethod
def give_vector(mesh):
return np.ones(mesh.shape)
@staticmethod
def give_scalar(mesh):
return np.ones(mesh.divisions)
def test_callable_to_array(self):
scalar_field = self.give_scalar(self.mesh)
@callable_to_array
def method(mesh, callable_or_array, some_kwarg=1):
return callable_or_array + some_kwarg
self.assertTrue(np.allclose(scalar_field + 1, method(self.mesh, self.give_scalar)), msg="Accept functions")
self.assertTrue(np.allclose(scalar_field + 1, method(self.mesh, scalar_field)), msg="Accept arrays")
self.assertTrue(np.allclose(scalar_field + 2, method(self.mesh, self.give_scalar, some_kwarg=2)),
msg="Pass kwargs")
def test_takes_scalar_field(self):
scalar_field = self.give_scalar(self.mesh)
@takes_scalar_field
def method(mesh, scalar_field, some_kwarg=1):
return some_kwarg
self.assertEqual(1, method(self.mesh, scalar_field), msg="Accept arrays")
self.assertEqual(2, method(self.mesh, scalar_field, some_kwarg=2), msg="Pass kwargs")
self.assertEqual(1, method(self.mesh, scalar_field.tolist()), msg="Should work with listlike stuff too")
self.assertRaises(TypeError, method, self.mesh, np.ones(2)) # Reject the wrong shape
self.assertRaises(TypeError, method, self.mesh, "not even numeric") # Duh
def test_takes_vector_field(self):
vector_field = self.give_vector(self.mesh)
@takes_vector_field
def method(mesh, vector_field, some_kwarg=1):
return some_kwarg
self.assertEqual(1, method(self.mesh, vector_field), msg="Accept arrays")
self.assertEqual(2, method(self.mesh, vector_field, some_kwarg=2), msg="Pass kwargs")
self.assertEqual(1, method(self.mesh, vector_field.tolist()), msg="Should work with listlike stuff too")
self.assertRaises(TypeError, method, self.mesh, np.ones(2)) # Reject the wrong shape
self.assertRaises(TypeError, method, self.mesh, "not even numeric") # Duh
def test_has_default_accuracy(self):
some_field = self.give_vector(self.mesh)
@has_default_accuracy
def method(mesh, field, accuracy=None, some_kwarg=1):
return accuracy + some_kwarg
mesh = RectMesh(1, 1, accuracy=2)
self.assertEqual(3, method(mesh, some_field), 'Use mesh accuracy')
self.assertEqual(0, method(mesh, some_field, accuracy=4, some_kwarg=-4), 'Use passed accuracy')
self.assertRaises(ValueError, method, mesh, some_field, accuracy=1) # Even accuracy only
self.assertRaises(ValueError, method, mesh, some_field, accuracy=0) # Positive accuracy only
@has_default_accuracy
def method(mesh, field, accuracy_not_a_kwarg=42):
return None
self.assertRaises(TypeError, method, mesh, some_field) # Methods need to define accuracy
class TestRectMesh(PyironTestCase):
@staticmethod
def scalar_sines(mesh):
L = mesh.lengths
omega = (2 * np.pi / L).reshape(len(L), *[1] * mesh.dim)
return np.prod(np.sin(omega * mesh.mesh), axis=0)
def vector_sines(self, mesh):
scalar = self.scalar_sines(mesh)
return np.array(mesh.dim * [scalar])
@property
def docstring_module(self):
return mesh_mod
def test_input(self):
L = np.pi
n = 2
mesh = RectMesh(L, n)
self.assertTrue(np.allclose(mesh.bounds, [[0, L]]),
msg='Expected float to be converted to (1,2) array.')
self.assertTrue(np.all(mesh.divisions == [n]),
msg='Expected int to be converted to (1,) array.')
mesh = RectMesh([L, L], n)
self.assertTrue(np.allclose(mesh.bounds, [[0, L], [0, L]]),
msg='Expected 1D bounds to be interpreted as endpoints of 2D bounds.')
self.assertTrue(np.all(mesh.divisions == [n, n]),
msg='Expected divisions to be extended to match bounds.')
mesh = RectMesh([[0, L], [L / 2, L]], [n, 2 * n])
self.assertTrue(np.allclose(mesh.bounds, [[0, L], [L / 2, L]]),
msg='Expected float to be converted to (1,2) array.')
self.assertTrue(np.all(mesh.divisions == [n, 2 * n]),
msg='Expected divisions to be preserved.')
bounds = np.array([1, 2, 3, 4])
self.assertAlmostEqual(
bounds.prod(),
RectMesh(bounds=bounds).volume,
msg="Four dimensions should be ok, and hyper-volume should be a product of side lengths"
)
self.assertRaises(ValueError, RectMesh, [[0, 1, 2]], 1) # Bounds can't exceed shape (n, 2)
self.assertRaises(ValueError, RectMesh, [[1, 1 + 1e-12]]) # Bounds must enclose a space noticeably > 0
self.assertRaises(ValueError, RectMesh, 1, [1, 1]) # Divisions must be a single value or match bounds
self.assertRaises(TypeError, RectMesh, 1, np.pi) # Only int-like divisions
self.assertRaises(TypeError, RectMesh, 1, [[1]]) # Or lists of ints, but nothing else like lists of lists
def test_construction(self):
L = np.pi
n = 2
mesh = RectMesh(L, n)
self.assertTrue(np.allclose(mesh.mesh, [0, L / 2]), msg='1D should get simplified')
self.assertAlmostEqual(mesh.steps, L / 2, msg='1D should get simplified')
mesh.simplify_1d = False
self.assertTrue(np.allclose(mesh.steps, [L / 2]), msg='1D should stay list-like')
mesh = RectMesh([L, 2 * L], n)
self.assertTrue(
np.allclose(
mesh.mesh,
[
[
[0, 0],
[L / 2, L / 2],
],
[
[0, L],
[0, L],
]
]
)
)
self.assertTrue( | np.allclose(mesh.steps, [L / 2, L]) | numpy.allclose |
###
# pySuStaIn: a Python implementation of the Subtype and Stage Inference (SuStaIn) algorithm
#
# If you use pySuStaIn, please cite the following core papers:
# 1. The original SuStaIn paper: https://doi.org/10.1038/s41467-018-05892-0
# 2. The pySuStaIn software paper: https://doi.org/10.1101/2021.06.09.447713
#
# Please also cite the corresponding progression pattern model you use:
# 1. The piece-wise linear z-score model (i.e. ZscoreSustain): https://doi.org/10.1038/s41467-018-05892-0
# 2. The event-based model (i.e. MixtureSustain): https://doi.org/10.1016/j.neuroimage.2012.01.062
# with Gaussian mixture modeling (i.e. 'mixture_gmm'): https://doi.org/10.1093/brain/awu176
# or kernel density estimation (i.e. 'mixture_kde'): https://doi.org/10.1002/alz.12083
# 3. The model for discrete ordinal data (i.e. OrdinalSustain): TBD
#
# Thanks a lot for supporting this project.
#
# Authors: <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
# Contributors: <NAME> (<EMAIL>), <NAME> (<EMAIL>), <NAME> (<EMAIL>)
###
from tqdm.auto import tqdm
import numpy as np
from matplotlib import pyplot as plt
from pySuStaIn.AbstractSustain import AbstractSustainData
from pySuStaIn.AbstractSustain import AbstractSustain
#*******************************************
#The data structure class for OrdinalSustain. It holds the score and negative likelihoods that get passed around and re-indexed in places.
class OrdinalSustainData(AbstractSustainData):
def __init__(self, prob_nl, prob_score, numStages):
self.prob_nl = prob_nl
self.prob_score = prob_score
self.__numStages = numStages
def getNumSamples(self):
return self.prob_nl.shape[0]
def getNumBiomarkers(self):
return self.prob_nl.shape[1]
def getNumStages(self):
return self.__numStages
def reindex(self, index):
return OrdinalSustainData(self.prob_nl[index,], self.prob_score[index,], self.__numStages)
#*******************************************
#An implementation of the AbstractSustain class with multiple events for each biomarker based on deviations from normality, measured in z-scores.
#There are a fixed number of thresholds for each biomarker, specified at initialization of the OrdinalSustain object.
class OrdinalSustain(AbstractSustain):
def __init__(self,
prob_nl,
prob_score,
score_vals,
biomarker_labels,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed=None):
# The initializer for the scored events model implementation of AbstractSustain
# Parameters:
# prob_nl - probability of negative/normal class for all subjects across all biomarkers
# dim: number of subjects x number of biomarkers
# prob_score - probability of each score for all subjects across all biomarkers
# dim: number of subjects x number of biomarkers x number of scores
# score_vals - a matrix specifying the scores for each biomarker
# dim: number of biomarkers x number of scores
# biomarker_labels - the names of the biomarkers as a list of strings
# N_startpoints - number of startpoints to use in maximum likelihood step of SuStaIn, typically 25
# N_S_max - maximum number of subtypes, should be 1 or more
# N_iterations_MCMC - number of MCMC iterations, typically 1e5 or 1e6 but can be lower for debugging
# output_folder - where to save pickle files, etc.
# dataset_name - for naming pickle files
# use_parallel_startpoints - boolean for whether or not to parallelize the maximum likelihood loop
# seed - random number seed
N = prob_nl.shape[1] # number of biomarkers
assert (len(biomarker_labels) == N), "number of labels should match number of biomarkers"
num_scores = score_vals.shape[1]
IX_vals = np.array([[x for x in range(N)]] * num_scores).T
stage_score = np.array([y for x in score_vals.T for y in x])
stage_score = stage_score.reshape(1,len(stage_score))
IX_select = stage_score>0
stage_score = stage_score[IX_select]
stage_score = stage_score.reshape(1,len(stage_score))
num_scores = score_vals.shape[1]
IX_vals = np.array([[x for x in range(N)]] * num_scores).T
stage_biomarker_index = np.array([y for x in IX_vals.T for y in x])
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
stage_biomarker_index = stage_biomarker_index[IX_select]
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
prob_score = prob_score.transpose(0,2,1)
prob_score = prob_score.reshape(prob_score.shape[0],prob_score.shape[1]*prob_score.shape[2])
prob_score = prob_score[:,IX_select[0,:]]
prob_score = prob_score.reshape(prob_nl.shape[0],stage_score.shape[1])
self.IX_select = IX_select
self.stage_score = stage_score
self.stage_biomarker_index = stage_biomarker_index
self.biomarker_labels = biomarker_labels
numStages = stage_score.shape[1]
self.__sustainData = OrdinalSustainData(prob_nl, prob_score, numStages)
super().__init__(self.__sustainData,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed)
def _initialise_sequence(self, sustainData, rng):
# Randomly initialises a linear z-score model ensuring that the biomarkers
# are monotonically increasing
#
#
# OUTPUTS:
# S - a random linear z-score model under the condition that each biomarker
# is monotonically increasing
N = np.array(self.stage_score).shape[1]
S = np.zeros(N)
for i in range(N):
IS_min_stage_score = np.array([False] * N)
possible_biomarkers = np.unique(self.stage_biomarker_index)
for j in range(len(possible_biomarkers)):
IS_unselected = [False] * N
for k in set(range(N)) - set(S[:i]):
IS_unselected[k] = True
this_biomarkers = np.array([(np.array(self.stage_biomarker_index)[0] == possible_biomarkers[j]).astype(int) +
(np.array(IS_unselected) == 1).astype(int)]) == 2
if not np.any(this_biomarkers):
this_min_stage_score = 0
else:
this_min_stage_score = min(self.stage_score[this_biomarkers])
if (this_min_stage_score):
temp = ((this_biomarkers.astype(int) + (self.stage_score == this_min_stage_score).astype(int)) == 2).T
temp = temp.reshape(len(temp), )
IS_min_stage_score[temp] = True
events = np.array(range(N))
possible_events = np.array(events[IS_min_stage_score])
this_index = np.ceil(rng.random() * ((len(possible_events)))) - 1
S[i] = possible_events[int(this_index)]
S = S.reshape(1, len(S))
return S
def _calculate_likelihood_stage(self, sustainData, S):
'''
Computes the likelihood of a single scored event model
Outputs:
========
p_perm_k - the probability of each subjects data at each stage of a particular subtype
in the SuStaIn model
'''
N = self.stage_score.shape[1]
B = sustainData.prob_nl.shape[1]
IS_normal = np.ones(B)
IS_abnormal = np.zeros(B)
index_reached = np.zeros(B,dtype=int)
M = sustainData.prob_score.shape[0]
p_perm_k = np.zeros((M,N+1))
p_perm_k[:,0] = 1/(N+1)*np.prod(sustainData.prob_nl,1)
for j in range(N):
index_justreached = int(S[j])
biomarker_justreached = int(self.stage_biomarker_index[:,index_justreached])
index_reached[biomarker_justreached] = index_justreached
IS_normal[biomarker_justreached] = 0
IS_abnormal[biomarker_justreached] = 1
bool_IS_normal = IS_normal.astype(bool)
bool_IS_abnormal = IS_abnormal.astype(bool)
p_perm_k[:,j+1] = 1/(N+1)*np.multiply(np.prod(sustainData.prob_score[:,index_reached[bool_IS_abnormal]],1),np.prod(sustainData.prob_nl[:,bool_IS_normal],1))
return p_perm_k
def _optimise_parameters(self, sustainData, S_init, f_init, rng):
# Optimise the parameters of the SuStaIn model
M = sustainData.getNumSamples() #data_local.shape[0]
N_S = S_init.shape[0]
N = self.stage_score.shape[1]
S_opt = S_init.copy() # have to copy or changes will be passed to S_init
f_opt = np.array(f_init).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
p_perm_k = np.zeros((M, N + 1, N_S))
for s in range(N_S):
p_perm_k[:, :, s] = self._calculate_likelihood_stage(sustainData, S_opt[s])
p_perm_k_weighted = p_perm_k * f_val_mat
#p_perm_k_norm = p_perm_k_weighted / np.tile(np.sum(np.sum(p_perm_k_weighted, 1), 1).reshape(M, 1, 1), (1, N + 1, N_S)) # the second summation axis is different to Matlab version
# adding 1e-250 fixes divide by zero problem that happens rarely
p_perm_k_norm = p_perm_k_weighted / np.sum(p_perm_k_weighted + 1e-250, axis=(1, 2), keepdims=True)
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
order_seq = rng.permutation(N_S) # this will produce different random numbers to Matlab
for s in order_seq:
order_bio = rng.permutation(N) # this will produce different random numbers to Matlab
for i in order_bio:
current_sequence = S_opt[s]
current_location = np.array([0] * len(current_sequence))
current_location[current_sequence.astype(int)] = np.arange(len(current_sequence))
selected_event = i
move_event_from = current_location[selected_event]
this_stage_score = self.stage_score[0, selected_event]
selected_biomarker = self.stage_biomarker_index[0, selected_event]
possible_scores_biomarker = self.stage_score[self.stage_biomarker_index == selected_biomarker]
# slightly different conditional check to matlab version to protect python from calling min,max on an empty array
min_filter = possible_scores_biomarker < this_stage_score
max_filter = possible_scores_biomarker > this_stage_score
events = np.array(range(N))
if np.any(min_filter):
min_score_bound = max(possible_scores_biomarker[min_filter])
min_score_bound_event = events[((self.stage_score[0] == min_score_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_lower_bound = current_location[min_score_bound_event] + 1
else:
move_event_to_lower_bound = 0
if np.any(max_filter):
max_score_bound = min(possible_scores_biomarker[max_filter])
max_score_bound_event = events[((self.stage_score[0] == max_score_bound).astype(int) + (self.stage_biomarker_index[0] == selected_biomarker).astype(int)) == 2]
move_event_to_upper_bound = current_location[max_score_bound_event]
else:
move_event_to_upper_bound = N
# FIXME: hack because python won't produce an array in range (N,N), while matlab will produce an array (N)... urgh
if move_event_to_lower_bound == move_event_to_upper_bound:
possible_positions = np.array([0])
else:
possible_positions = np.arange(move_event_to_lower_bound, move_event_to_upper_bound)
possible_sequences = np.zeros((len(possible_positions), N))
possible_likelihood = np.zeros((len(possible_positions), 1))
possible_p_perm_k = np.zeros((M, N + 1, len(possible_positions)))
for index in range(len(possible_positions)):
current_sequence = S_opt[s]
#choose a position in the sequence to move an event to
move_event_to = possible_positions[index]
# move this event in its new position
current_sequence = np.delete(current_sequence, move_event_from, 0) # this is different to the Matlab version, which call current_sequence(move_event_from) = []
new_sequence = np.concatenate([current_sequence[np.arange(move_event_to)], [selected_event], current_sequence[np.arange(move_event_to, N - 1)]])
possible_sequences[index, :] = new_sequence
possible_p_perm_k[:, :, index] = self._calculate_likelihood_stage(sustainData, new_sequence)
p_perm_k[:, :, s] = possible_p_perm_k[:, :, index]
total_prob_stage = np.sum(p_perm_k * f_val_mat, 2)
total_prob_subj = np.sum(total_prob_stage, 1)
possible_likelihood[index] = sum(np.log(total_prob_subj + 1e-250))
possible_likelihood = possible_likelihood.reshape(possible_likelihood.shape[0])
max_likelihood = max(possible_likelihood)
this_S = possible_sequences[possible_likelihood == max_likelihood, :]
this_S = this_S[0, :]
S_opt[s] = this_S
this_p_perm_k = possible_p_perm_k[:, :, possible_likelihood == max_likelihood]
p_perm_k[:, :, s] = this_p_perm_k[:, :, 0]
S_opt[s] = this_S
p_perm_k_weighted = p_perm_k * f_val_mat
p_perm_k_norm = p_perm_k_weighted / np.tile(np.sum(np.sum(p_perm_k_weighted, 1), 1).reshape(M, 1, 1), (1, N + 1, N_S)) # the second summation axis is different to Matlab version
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = | np.transpose(f_val_mat, (2, 1, 0)) | numpy.transpose |
'''
Author: <NAME>
Date: Feb 8, 2008.
Board class.
Board data:
1=white, -1=black, 0=empty
first dim is column , 2nd is row:
pieces[1][7] is the square in column 2,
at the opposite end of the board in row 8.
Squares are stored and manipulated as (x,y) tuples.
x is the column, y is the row.
'''
import numpy as np
class Board():
# list of all 6 directions on the board, as (x,y) offsets
__directions = [(2,0),(-2,0),(1,1),(1,-1),(-1,1),(-1,-1)]
# list of all entries of the matrix, which are actually spots on the board
actBoard = [(2,3),(3,2),(3,4),(4,1),(4,3),(4,5),(5,2),(5,4),(6,1),(6,3),(6,5),(7,2),(7,4),(8,1),(8,3),(8,5),(9,2),(9,4),(10,3)]
# list of all starting Points on the board
startingPoints = [(0,3),(1,2),(1,4),(2,1),(2,5),(3,0),(3,6),(5,0),(5,6),(7,0),(7,6),(9,0),(9,6),(10,1),(10,5),(11,2),(11,4),(12,3)]
# dictionary for the translation of the spot names into the entries of the matrix (as tuple)
move_dict = {"a1": (9,0), "a2": (7,0), "a3": (5,0), "a4": (3,0), "b1": (10,1), "b2": (8,1), "b3": (6,1), "b4": (4,1), "b5": (2,1), "c1": (11,2),
"c2": (9,2), "c5": (3,2), "c6": (1,2), "d1": (12,3), "d2": (10,3), "d6": (2,3), "d7": (0,3), "e1": (11,4), "e2": (9,4), "e5": (3,4),
"e6": (1,4), "f1": (10,5), "f2": (8,5), "f3": (6,5), "f4": (4,5), "f5": (2,5), "g1": (9,6), "g2": (7,6), "g3": (5,6), "g4": (3,6)}
def __init__(self, n):
"Set up initial board configuration."
self.n = n
# Create the empty board array.
self.pieces = [None]*self.n # rows: mini: 13, normal: 17
for i in range(self.n):
self.pieces[i] = [0]*(int(self.n//(1.8))) # columns: mini: 13//1.8=7 normal: 17//1.8=9
#Set up reserve in board corner
self.pieces[0][0] = 5
self.pieces[0][2] = 5
# Set up the initial 6 pieces.
self.pieces[4][1] = 1
self.pieces[4][5] = 1
self.pieces[10][3] = 1
self.pieces[8][1] = -1
self.pieces[8][5] = -1
self.pieces[2][3] = -1
"""
#Testfall Sym
self.pieces[8][1] = 1
self.pieces[10][3] = 1
self.pieces[4][5] = 1
self.pieces[2][3] = -1
self.pieces[7][4] = -1
self.pieces[8][5] = -1
#Testfall A
self.pieces[8][1] = -1
self.pieces[7][2] = -1
self.pieces[4][3] = -1
self.pieces[10][3] = 1
self.pieces[8][3] = 1
self.pieces[4][5] = 1
self.pieces[5][4] = 1
#Testfall B
self.pieces[7][2] = 1
self.pieces[6][1] = 1
self.pieces[10][3] = 1
self.pieces[8][3] = -1
self.pieces[4][3] = -1
self.pieces[2][3] = -1
#Testfall C
self.pieces[4][1] = 1
self.pieces[5][2] = -1
self.pieces[10][3] = 1
self.pieces[4][3] = -1
self.pieces[2][3] = -1
#Testfall D
self.pieces[6][1] = -1
self.pieces[7][2] = -1
self.pieces[9][4] = 1
self.pieces[10][3] = -1
self.pieces[6][3] = -1
self.pieces[4][3] = -1
self.pieces[2][3] = 1
"""
# add [][] indexer syntax to the Board
def __getitem__(self, index):
return self.pieces[index]
def __setitem__(self, index, color):
self.pieces[index] = color
def get_actBoard(self):
if self.n == 13:
return self.actBoard
else:
pass # return actBoard + ext
def get_startingPoints(self):
if self.n == 13:
return self.startingPoints
else:
pass # return actBoard + ext
@staticmethod
def translate_move(move):
"""Returns a tuple of the spot names as a tuple of the matrix
"""
try:
move_new = (Board.move_dict[move[0]],Board.move_dict[move[1]])
return move_new
except KeyError:
'Invalid Field'
def get_legal_moves(self):
"""Returns all the legal moves
"""
moves = set() # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[1],[2]
moves.update(newmoves)
return list(moves)
def get_legal_moves_binary(self):
"""Returns all the legal moves
"""
moves = [] # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[2]
moves.extend(newmoves)
return moves
def get_all_moves(self):
"""Returns all the legal moves
"""
moves = [] # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[1]
moves.extend(newmoves)
return moves
def get_moves_for_dot(self, dot):
"""Returns all the legal moves that use the given dot as a base.
"""
# search all possible directions.
legal_moves = []
all_moves = []
all_moves_binary = []
for direction in self.__directions:
target = tuple( | np.add(dot, direction) | numpy.add |
import numpy as np
import SimpleITK as sitk
from scipy.interpolate import griddata
from platipy.imaging.label.utils import vectorised_transform_index_to_physical_point
def evaluate_distance_on_surface(
reference_volume, test_volume, abs_distance=True, reference_as_distance_map=False
):
"""
Evaluates a distance map on a surface
Input: reference_volume: binary volume SimpleITK image, or alternatively a distance map
test_volume: binary volume SimpleITK image
Output: theta, phi, values
"""
if reference_as_distance_map:
reference_distance_map = reference_volume
else:
if abs_distance:
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(
reference_volume, squaredDistance=False, useImageSpacing=True
)
)
else:
reference_distance_map = sitk.SignedMaurerDistanceMap(
reference_volume, squaredDistance=False, useImageSpacing=True
)
test_surface = sitk.LabelContour(test_volume)
distance_image = sitk.Multiply(
reference_distance_map, sitk.Cast(test_surface, sitk.sitkFloat32)
)
distance_array = sitk.GetArrayFromImage(distance_image)
# Get centre of mass of reference volume
reference_volume_array = sitk.GetArrayFromImage(reference_volume)
reference_volume_locations = np.where(reference_volume_array == 1)
com_index = reference_volume_locations.mean(axis=1)
com_real = vectorised_transform_index_to_physical_point(reference_volume, com_index)
# Calculate centre of mass in real coordinates
test_surface_array = sitk.GetArrayFromImage(test_surface)
test_surface_locations = np.where(test_surface_array == 1)
test_surface_locations_array = | np.array(test_surface_locations) | numpy.array |
import os
import cv2
import time
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
from yolo.detector import YOLO
from compare.net.dtfd import dtfd
hand = YOLO(weights='../weights/yolo.h5', threshold=0.5)
fingertip = dtfd()
fingertip.load_weights('weights/dtfd.h5')
def classify(img):
""" Fingertip detection """
global fingertip
img = img / 255.0
img = np.expand_dims(img, axis=0)
keys = fingertip.predict(img)
keys = keys[0]
return keys
def flip_horizontal(img, keys):
""" Flipping """
aug = iaa.Sequential([iaa.Fliplr(1.0)])
seq_det = aug.to_deterministic()
keys = ia.KeypointsOnImage([ia.Keypoint(x=keys[0], y=keys[1]),
ia.Keypoint(x=keys[2], y=keys[3]),
ia.Keypoint(x=keys[4], y=keys[5]),
ia.Keypoint(x=keys[6], y=keys[7])], shape=img.shape)
image_aug = seq_det.augment_images([img])[0]
keys_aug = seq_det.augment_keypoints([keys])[0]
k = keys_aug.keypoints
keys_aug = [k[0].x, k[0].y, k[1].x, k[1].y, k[2].x, k[2].y, k[3].x, k[3].y]
return image_aug, keys_aug
image_directory = '../../Dataset/Test/'
label_directory = '../../Dataset/label/'
image_files = os.listdir(image_directory)
""" Ground truth label file for TI1K dataset """
file = open(label_directory + 'TI1K.txt')
lines = file.readlines()
file.close()
""" Ground truth label file for SingleEight dataset """
file = open(label_directory + 'SingleEight.txt')
ego_lines = file.readlines()
file.close()
total_error = np.zeros([1, 4])
avg_hand_detect_time = 0
avg_fingertip_detect_time = 0
avg_time = 0
count = 0
distance_error = []
height = 480
width = 640
for image_file in image_files:
""" Generating ground truths labels """
image = cv2.imread(image_directory + image_file)
image = cv2.resize(image, (width, height))
name = image_file[:-4]
splits = name.split('_')
gt = []
if 'TI1K' in splits:
label = []
for line in lines:
line = line.strip().split()
if image_file == line[0]:
label = line[1:]
break
label = [float(i) for i in label]
x1 = label[0] * width
y1 = label[1] * height
x2 = label[2] * width
y2 = label[3] * height
xt = label[4] * width
yt = label[5] * height
xi = label[6] * width
yi = label[7] * height
gt = [x1, y1, x2, y2, xt, yt, xi, yi]
image_flip, gt_flip = flip_horizontal(image, np.asarray(gt))
image_flip = image_flip.copy()
"""
[x1, y1, x2, y2, xt, yt, xi, yi] = gt_flip
image_flip = cv2.rectangle(image_flip, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 4)
image_flip = cv2.circle(image_flip, (int(xt), int(yt)), 14, (0, 0, 255), -1)
image_flip = cv2.circle(image_flip, (int(xi), int(yi)), 14, (0, 255, 0), -1)
cv2.imshow('Image', image_flip)
cv2.waitKey(0)
"""
else:
label = []
for line in ego_lines:
line = line.strip().split()
name = line[0].split('/')[3]
if image_file == name:
label = line[1:]
break
label = [float(i) for i in label]
x1 = label[0] * width
y1 = label[1] * height
x2 = label[2] * width
y2 = label[3] * height
xt = label[4] * width
yt = label[5] * height
xi = label[8] * width
yi = label[9] * height
gt = [x1, y1, x2, y2, xt, yt, xi, yi]
image_flip, gt_flip = flip_horizontal(image, np.asarray(gt))
image_flip = image_flip.copy()
"""
image = cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 4)
image = cv2.circle(image, (xt, yt), 14, (0, 0, 255), -1)
image = cv2.circle(image, (xi, yi), 14, (0, 255, 0), -1)
cv2.imshow('', image)
cv2.waitKey(0)
"""
tic1 = time.time()
""" Predictions for the test images """
image = cv2.imread(image_directory + image_file)
image = cv2.resize(image, (width, height))
tic2 = time.time()
tl, br = hand.detect(image=image)
toc2 = time.time()
avg_hand_detect_time = avg_hand_detect_time + (toc2 - tic2)
if tl and br is not None:
# list to tuple
tl = (tl[0], tl[1])
br = (br[0], br[1])
""" Fingertip detection """
xmin = int(tl[0])
ymin = int(tl[1])
xmax = int(br[0])
ymax = int(br[1])
ymin = ymin if ymin > 0 else 0
xmin = xmin if xmin > 0 else 0
cropped_image = image[ymin:ymax, xmin:xmax]
cols, rows, _ = cropped_image.shape
cropped_image = cv2.resize(cropped_image, (99, 99))
tic3 = time.time()
position = classify(img=cropped_image)
toc3 = time.time()
avg_fingertip_detect_time = avg_fingertip_detect_time + (toc3 - tic3)
for i in range(0, len(position), 2):
position[i] = (position[i]) * rows
position[i + 1] = (position[i + 1]) * cols
for i in range(0, len(position), 2):
position[i] = (position[i] + tl[0])
position[i + 1] = (position[i + 1] + tl[1])
pr = [tl[0], tl[1], br[0], br[1], position[0], position[1], position[2], position[3]]
""" Drawing bounding box and fingertip """
image = cv2.rectangle(image, tl, br, (255, 0, 0), 4, 1)
image = cv2.circle(image, (int(position[0]), int(position[1])), 10, (0, 0, 255), -1)
image = cv2.circle(image, (int(position[2]), int(position[3])), 10, (0, 255, 0), -1)
cv2.imwrite('output/' + image_file, image)
# Calculating error for fingertips only
gt = | np.asarray(gt[4:]) | numpy.asarray |
# tests.test_bestfit
# Tests for the bestfit module.
#
# Author: <NAME> <<EMAIL>>
# Created: Sun Jun 26 19:27:39 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_bestfit.py [56236f3] <EMAIL> $
"""
Tests for the bestfit module.
"""
##########################################################################
## Imports
##########################################################################
import pytest
import numpy as np
import matplotlib.pyplot as plt
from tests.base import VisualTestCase
from yellowbrick.bestfit import *
from yellowbrick.anscombe import ANSCOMBE
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
##########################################################################
## Best fit tests
##########################################################################
class BestFitTests(VisualTestCase):
def test_bad_estimator(self):
"""
Test that a bad estimator name raises a value error.
"""
fig, axe = plt.subplots()
X, y = ANSCOMBE[1]
with self.assertRaises(YellowbrickValueError):
draw_best_fit(X, y, axe, 'pepper')
def test_ensure_same_length(self):
"""
Ensure that vectors of different lengths raise
"""
fig, axe = plt.subplots()
X = np.array([1, 2, 3, 5, 8, 10, 2])
y = np.array([1, 3, 6, 2])
with self.assertRaises(YellowbrickValueError):
draw_best_fit(X, y, axe, 'linear')
with self.assertRaises(YellowbrickValueError):
draw_best_fit(X[:,np.newaxis], y, axe, 'linear')
@pytest.mark.filterwarnings('ignore')
def testdraw_best_fit(self):
"""
Test that drawing a best fit line works.
"""
fig, axe = plt.subplots()
X, y = ANSCOMBE[0]
self.assertEqual(axe, draw_best_fit(X, y, axe, 'linear'))
self.assertEqual(axe, draw_best_fit(X, y, axe, 'quadratic'))
##########################################################################
## Estimator tests
##########################################################################
class EstimatorTests(VisualTestCase):
"""
Test the estimator functions for best fit lines.
"""
def test_linear(self):
"""
Test the linear best fit estimator
"""
X, y = ANSCOMBE[0]
X = np.array(X)
y = np.array(y)
X = X[:,np.newaxis]
model = fit_linear(X, y)
self.assertIsNotNone(model)
self.assertIsInstance(model, LinearRegression)
def test_quadratic(self):
"""
Test the quadratic best fit estimator
"""
X, y = ANSCOMBE[1]
X = np.array(X)
y = np.array(y)
X = X[:,np.newaxis]
model = fit_quadratic(X, y)
self.assertIsNotNone(model)
self.assertIsInstance(model, Pipeline)
def test_select_best(self):
"""
Test the select best fit estimator
"""
X, y = ANSCOMBE[1]
X = | np.array(X) | numpy.array |
import numpy as np
import pytest
from autolens.data.array import mask
from autolens.data.array import interpolation
from autolens.model.galaxy import galaxy
from autolens.model.profiles import mass_profiles
@pytest.fixture(name='scheme')
def make_scheme():
return interpolation.InterpolationScheme(shape=(3, 3), image_coords=np.array([[1.0, 1.0]]), image_pixel_scale=1.0)
@pytest.fixture(name='geometry')
def make_geometry():
return interpolation.InterpolationGeometry(y_min=-1.0, y_max=1.0, x_min=-1.0, x_max=1.0,
y_pixel_scale=1.0, x_pixel_scale=1.0)
@pytest.fixture(name='galaxy_no_profiles', scope='function')
def make_galaxy_no_profiles():
return galaxy.Galaxy()
@pytest.fixture(name="galaxy_mass_sis")
def make_galaxy_mass_sis():
sis = mass_profiles.SphericalIsothermal(einstein_radius=1.0)
return galaxy.Galaxy(mass_profile=sis)
class TestInterpolationScheme(object):
class TestConstructor:
def test__sets_up_attributes_correctly(self):
image_coords = np.array([[-1.0, -6.0], [-1.0, 0.0], [-4.0, 2.0],
[-0.0, -1.0], [0.0, 0.0], [0.0, 1.0],
[3.0, -1.0], [1.0, 0.0], [1.0, 1.0]])
interp = interpolation.InterpolationScheme(shape=(3, 3), image_coords=image_coords, image_pixel_scale=1.0)
assert interp.shape == (3, 3)
assert interp.pixels == 9
assert (interp.image_coords == image_coords).all()
assert interp.geometry.y_min == -6.0
assert interp.geometry.y_max == 2.0
assert interp.geometry.x_min == -4.0
assert interp.geometry.x_max == 3.0
assert interp.geometry.y_pixel_scale == 1.0
assert interp.geometry.x_pixel_scale == 1.0
assert interp.geometry.x_size == 7.0
assert interp.geometry.y_size == 8.0
assert interp.geometry.x_start == -4.5
assert interp.geometry.y_start == -6.5
class TestNeighbors:
def test___3x3_grid_neighbors_all_correct(self):
# |0|1|2|
# |3|4|5|
# |6|7|8|
interp = interpolation.InterpolationScheme(shape=(3, 3), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 3, 4])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 4, 5])).all()
assert (interp.bottom_right_neighbors[2] == np.array([-1, 5, -1])).all()
assert (interp.bottom_right_neighbors[3] == np.array([4, 6, 7])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 7, 8])).all()
assert (interp.bottom_right_neighbors[5] == np.array([-1, 8, -1])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, -1, -1])).all()
assert (interp.bottom_right_neighbors[7] == np.array([8, -1, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 3])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 3, 4])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 4, 5])).all()
assert (interp.bottom_left_neighbors[3] == np.array([-1, -1, 6])).all()
assert (interp.bottom_left_neighbors[4] == np.array([3, 6, 7])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 7, 8])).all()
assert (interp.bottom_left_neighbors[6] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, -1, -1])).all()
assert (interp.bottom_left_neighbors[8] == np.array([7, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[3] == np.array([0, 1, 4])).all()
assert (interp.top_right_neighbors[4] == np.array([1, 2, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([2, -1, -1])).all()
assert (interp.top_right_neighbors[6] == np.array([3, 4, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([4, 5, 8])).all()
assert (interp.top_right_neighbors[8] == np.array([5, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[4] == np.array([0, 1, 3])).all()
assert (interp.top_left_neighbors[5] == np.array([1, 2, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([-1, 3, -1])).all()
assert (interp.top_left_neighbors[7] == np.array([3, 4, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([4, 5, 7])).all()
def test___3x4_grid_neighbors_all_correct(self):
# |0|1| 2| 3|
# |4|5| 6| 7|
# |8|9|10|11|
interp = interpolation.InterpolationScheme(shape=(3, 4), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 4, 5])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 5, 6])).all()
assert (interp.bottom_right_neighbors[2] == np.array([3, 6, 7])).all()
assert (interp.bottom_right_neighbors[3] == np.array([-1, 7, -1])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 8, 9])).all()
assert (interp.bottom_right_neighbors[5] == np.array([6, 9, 10])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, 10, 11])).all()
assert (interp.bottom_right_neighbors[7] == np.array([-1, 11, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([9, -1, -1])).all()
assert (interp.bottom_right_neighbors[9] == np.array([10, -1, -1])).all()
assert (interp.bottom_right_neighbors[10] == np.array([11, -1, -1])).all()
assert (interp.bottom_right_neighbors[11] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 4])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 4, 5])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 5, 6])).all()
assert (interp.bottom_left_neighbors[3] == np.array([2, 6, 7])).all()
assert (interp.bottom_left_neighbors[4] == np.array([-1, -1, 8])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 8, 9])).all()
assert (interp.bottom_left_neighbors[6] == np.array([5, 9, 10])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, 10, 11])).all()
assert (interp.bottom_left_neighbors[8] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[9] == np.array([8, -1, -1])).all()
assert (interp.bottom_left_neighbors[10] == np.array([9, -1, -1])).all()
assert (interp.bottom_left_neighbors[11] == np.array([10, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, 3])).all()
assert (interp.top_right_neighbors[3] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[4] == np.array([0, 1, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([1, 2, 6])).all()
assert (interp.top_right_neighbors[6] == np.array([2, 3, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([3, -1, -1])).all()
assert (interp.top_right_neighbors[8] == np.array([4, 5, 9])).all()
assert (interp.top_right_neighbors[9] == np.array([5, 6, 10])).all()
assert (interp.top_right_neighbors[10] == np.array([6, 7, 11])).all()
assert (interp.top_right_neighbors[11] == np.array([7, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, -1, 2])).all()
assert (interp.top_left_neighbors[4] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[5] == np.array([0, 1, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([1, 2, 5])).all()
assert (interp.top_left_neighbors[7] == np.array([2, 3, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([-1, 4, -1])).all()
assert (interp.top_left_neighbors[9] == np.array([4, 5, 8])).all()
assert (interp.top_left_neighbors[10] == np.array([5, 6, 9])).all()
assert (interp.top_left_neighbors[11] == np.array([6, 7, 10])).all()
def test___4x3_grid_neighbors_all_correct(self):
# |0| 1| 2|
# |3| 4| 5|
# |6| 7| 8|
# |9|10|11|
interp = interpolation.InterpolationScheme(shape=(4, 3), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 3, 4])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 4, 5])).all()
assert (interp.bottom_right_neighbors[2] == np.array([-1, 5, -1])).all()
assert (interp.bottom_right_neighbors[3] == np.array([4, 6, 7])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 7, 8])).all()
assert (interp.bottom_right_neighbors[5] == np.array([-1, 8, -1])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, 9, 10])).all()
assert (interp.bottom_right_neighbors[7] == np.array([8, 10, 11])).all()
assert (interp.bottom_right_neighbors[8] == np.array([-1, 11, -1])).all()
assert (interp.bottom_right_neighbors[9] == np.array([10, -1, -1])).all()
assert (interp.bottom_right_neighbors[10] == np.array([11, -1, -1])).all()
assert (interp.bottom_right_neighbors[11] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 3])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 3, 4])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 4, 5])).all()
assert (interp.bottom_left_neighbors[3] == np.array([-1, -1, 6])).all()
assert (interp.bottom_left_neighbors[4] == np.array([3, 6, 7])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 7, 8])).all()
assert (interp.bottom_left_neighbors[6] == np.array([-1, -1, 9])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, 9, 10])).all()
assert (interp.bottom_left_neighbors[8] == np.array([7, 10, 11])).all()
assert (interp.bottom_left_neighbors[9] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[10] == np.array([9, -1, -1])).all()
assert (interp.bottom_left_neighbors[11] == np.array([10, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[3] == np.array([0, 1, 4])).all()
assert (interp.top_right_neighbors[4] == np.array([1, 2, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([2, -1, -1])).all()
assert (interp.top_right_neighbors[6] == np.array([3, 4, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([4, 5, 8])).all()
assert (interp.top_right_neighbors[8] == np.array([5, -1, -1])).all()
assert (interp.top_right_neighbors[9] == np.array([6, 7, 10])).all()
assert (interp.top_right_neighbors[10] == np.array([7, 8, 11])).all()
assert (interp.top_right_neighbors[11] == np.array([8, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[4] == np.array([0, 1, 3])).all()
assert (interp.top_left_neighbors[5] == np.array([1, 2, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([-1, 3, -1])).all()
assert (interp.top_left_neighbors[7] == np.array([3, 4, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([4, 5, 7])).all()
assert (interp.top_left_neighbors[9] == np.array([-1, 6, -1])).all()
assert (interp.top_left_neighbors[10] == np.array([6, 7, 9])).all()
assert (interp.top_left_neighbors[11] == np.array([7, 8, 10])).all()
def test___4x4_grid_neighbors_all_correct(self):
# | 0| 1| 2| 3|
# | 4| 5| 6| 7|
# | 8| 9|10|11|
# |12|13|14|15|
interp = interpolation.InterpolationScheme(shape=(4, 4), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 4, 5])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 5, 6])).all()
assert (interp.bottom_right_neighbors[2] == np.array([3, 6, 7])).all()
assert (interp.bottom_right_neighbors[3] == np.array([-1, 7, -1])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 8, 9])).all()
assert (interp.bottom_right_neighbors[5] == np.array([6, 9, 10])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, 10, 11])).all()
assert (interp.bottom_right_neighbors[7] == np.array([-1, 11, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([9, 12, 13])).all()
assert (interp.bottom_right_neighbors[9] == np.array([10, 13, 14])).all()
assert (interp.bottom_right_neighbors[10] == np.array([11, 14, 15])).all()
assert (interp.bottom_right_neighbors[11] == np.array([-1, 15, -1])).all()
assert (interp.bottom_right_neighbors[12] == np.array([13, -1, -1])).all()
assert (interp.bottom_right_neighbors[13] == np.array([14, -1, -1])).all()
assert (interp.bottom_right_neighbors[14] == np.array([15, -1, -1])).all()
assert (interp.bottom_right_neighbors[15] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 4])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 4, 5])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 5, 6])).all()
assert (interp.bottom_left_neighbors[3] == np.array([2, 6, 7])).all()
assert (interp.bottom_left_neighbors[4] == np.array([-1, -1, 8])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 8, 9])).all()
assert (interp.bottom_left_neighbors[6] == np.array([5, 9, 10])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, 10, 11])).all()
assert (interp.bottom_left_neighbors[8] == np.array([-1, -1, 12])).all()
assert (interp.bottom_left_neighbors[9] == np.array([8, 12, 13])).all()
assert (interp.bottom_left_neighbors[10] == np.array([9, 13, 14])).all()
assert (interp.bottom_left_neighbors[11] == np.array([10, 14, 15])).all()
assert (interp.bottom_left_neighbors[12] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[13] == np.array([12, -1, -1])).all()
assert (interp.bottom_left_neighbors[14] == np.array([13, -1, -1])).all()
assert (interp.bottom_left_neighbors[15] == np.array([14, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, 3])).all()
assert (interp.top_right_neighbors[3] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[4] == np.array([0, 1, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([1, 2, 6])).all()
assert (interp.top_right_neighbors[6] == np.array([2, 3, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([3, -1, -1])).all()
assert (interp.top_right_neighbors[8] == np.array([4, 5, 9])).all()
assert (interp.top_right_neighbors[9] == np.array([5, 6, 10])).all()
assert (interp.top_right_neighbors[10] == np.array([6, 7, 11])).all()
assert (interp.top_right_neighbors[11] == np.array([7, -1, -1])).all()
assert (interp.top_right_neighbors[12] == np.array([8, 9, 13])).all()
assert (interp.top_right_neighbors[13] == np.array([9, 10, 14])).all()
assert (interp.top_right_neighbors[14] == np.array([10, 11, 15])).all()
assert (interp.top_right_neighbors[15] == np.array([11, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, -1, 2])).all()
assert (interp.top_left_neighbors[4] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[5] == np.array([0, 1, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([1, 2, 5])).all()
assert (interp.top_left_neighbors[7] == np.array([2, 3, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([-1, 4, -1])).all()
assert (interp.top_left_neighbors[9] == np.array([4, 5, 8])).all()
assert (interp.top_left_neighbors[10] == np.array([5, 6, 9])).all()
assert (interp.top_left_neighbors[11] == np.array([6, 7, 10])).all()
assert (interp.top_left_neighbors[12] == np.array([-1, 8, -1])).all()
assert (interp.top_left_neighbors[13] == np.array([8, 9, 12])).all()
assert (interp.top_left_neighbors[14] == np.array([9, 10, 13])).all()
assert (interp.top_left_neighbors[15] == np.array([10, 11, 14])).all()
class TestFromMask:
def test__passes_mask_pixel_scale(self):
msk = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 3))
assert interp.image_pixel_scale == msk.pixel_scale
def test__3x3_mask_with_1_pixel__3x3_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 3))
assert interp.image_coords[0] == pytest.approx(np.array([-1.0, -1.0]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.0, 0.0]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.0, 1.0]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([0.0, -1.0]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([0.0, 0.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([0.0, 1.0]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([1.0, -1.0]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([1.0, 0.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([1.0, 1.0]), 1e-4)
def test__same_as_above__change_pixel_scale(self):
msk = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
msk = mask.Mask(msk, pixel_scale=2.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 3))
assert interp.image_coords[0] == pytest.approx(np.array([-2.0, -2.0]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-2.0, 0.0]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-2.0, 2.0]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([0.0, -2.0]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([0.0, 0.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([0.0, 2.0]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([2.0, -2.0]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([2.0, 0.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([2.0, 2.0]), 1e-4)
def test__3x3_mask_with_1_pixel__4x4_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(4, 4))
assert interp.image_coords[0] == pytest.approx(np.array([-1.0, -1.0]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.0, -(1. / 3.)]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.0, (1. / 3.)]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([-1.0, 1.0]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([-(1. / 3.), -1.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([-(1. / 3.), -(1. / 3.)]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([-(1. / 3.), (1. / 3.)]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([-(1. / 3.), 1.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([(1. / 3.), -1.0]), 1e-4)
assert interp.image_coords[9] == pytest.approx(np.array([(1. / 3.), -(1. / 3.)]), 1e-4)
assert interp.image_coords[10] == pytest.approx(np.array([(1. / 3.), (1. / 3.)]), 1e-4)
assert interp.image_coords[11] == pytest.approx(np.array([(1. / 3.), 1.0]), 1e-4)
assert interp.image_coords[12] == pytest.approx(np.array([1.0, -1.0]), 1e-4)
assert interp.image_coords[13] == pytest.approx(np.array([1.0, -(1. / 3.)]), 1e-4)
assert interp.image_coords[14] == pytest.approx(np.array([1.0, (1. / 3.)]), 1e-4)
assert interp.image_coords[15] == pytest.approx(np.array([1.0, 1.0]), 1e-4)
def test__3x3_mask_with_1_pixel__3x4_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 4))
assert interp.image_coords[0] == pytest.approx(np.array([-1.0, -1.0]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.0, -(1. / 3.)]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.0, (1. / 3.)]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([-1.0, 1.0]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([0.0, -1.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([0.0, -(1. / 3.)]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([0.0, (1. / 3.)]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([0.0, 1.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([1.0, -1.0]), 1e-4)
assert interp.image_coords[9] == pytest.approx(np.array([1.0, -(1. / 3.)]), 1e-4)
assert interp.image_coords[10] == pytest.approx(np.array([1.0, (1. / 3.)]), 1e-4)
assert interp.image_coords[11] == pytest.approx(np.array([1.0, 1.0]), 1e-4)
def test__3x3_mask_with_1_pixel__4x3_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(4, 3))
assert interp.image_coords[0] == pytest.approx(np.array([-1.0, -1.0]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.0, 0.0]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.0, 1.0]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([-(1. / 3.), -1.0]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([-(1. / 3.), 0.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([-(1. / 3.), 1.0]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([(1. / 3.), -1.0]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([(1. / 3.), 0.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([(1. / 3.), 1.0]), 1e-4)
assert interp.image_coords[9] == pytest.approx(np.array([1.0, -1.0]), 1e-4)
assert interp.image_coords[10] == pytest.approx(np.array([1.0, 0.0]), 1e-4)
assert interp.image_coords[11] == pytest.approx(np.array([1.0, 1.0]), 1e-4)
def test__4x4_mask_with_4_pixels__3x3_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True, True],
[True, False, False, True],
[True, False, False, True],
[True, True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 3))
assert interp.image_coords[0] == pytest.approx(np.array([-1.5, -1.5]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.5, 0.0]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.5, 1.5]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([0.0, -1.5]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([0.0, 0.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([0.0, 1.5]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([1.5, -1.5]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([1.5, 0.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([1.5, 1.5]), 1e-4)
def test__3x4_mask_with_2_pixels__3x3_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True, True],
[True, False, False, True],
[True, True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 3))
assert interp.image_coords[0] == pytest.approx(np.array([-1.5, -1.0]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.5, 0.0]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.5, 1.0]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([0.0, -1.0]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([0.0, 0.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([0.0, 1.0]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([1.5, -1.0]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([1.5, 0.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([1.5, 1.0]), 1e-4)
def test__4x3_mask_with_4_pixels__3x3_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True],
[True, False, True],
[True, False, True],
[True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 3))
assert interp.image_coords[0] == pytest.approx(np.array([-1.0, -1.5]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.0, 0.0]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.0, 1.5]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([0.0, -1.5]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([0.0, 0.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([0.0, 1.5]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([1.0, -1.5]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([1.0, 0.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([1.0, 1.5]), 1e-4)
def test__3x4_mask_with_2_pixels__3x4_interp_grid__image_coords_extend_beyond_mask(self):
msk = np.array([[True, True, True, True],
[True, False, False, True],
[True, True, True, True]])
msk = mask.Mask(msk, pixel_scale=1.0)
interp = interpolation.InterpolationScheme.from_mask(mask=msk, shape=(3, 4))
assert interp.image_coords[0] == pytest.approx(np.array([-1.5, -1.0]), 1e-4)
assert interp.image_coords[1] == pytest.approx(np.array([-1.5, -(1. / 3.)]), 1e-4)
assert interp.image_coords[2] == pytest.approx(np.array([-1.5, (1. / 3.)]), 1e-4)
assert interp.image_coords[3] == pytest.approx(np.array([-1.5, 1.0]), 1e-4)
assert interp.image_coords[4] == pytest.approx(np.array([0.0, -1.0]), 1e-4)
assert interp.image_coords[5] == pytest.approx(np.array([0.0, -(1. / 3.)]), 1e-4)
assert interp.image_coords[6] == pytest.approx(np.array([0.0, (1. / 3.)]), 1e-4)
assert interp.image_coords[7] == pytest.approx(np.array([0.0, 1.0]), 1e-4)
assert interp.image_coords[8] == pytest.approx(np.array([1.5, -1.0]), 1e-4)
assert interp.image_coords[9] == pytest.approx(np.array([1.5, -(1. / 3.)]), 1e-4)
assert interp.image_coords[10] == pytest.approx(np.array([1.5, (1. / 3.)]), 1e-4)
assert interp.image_coords[11] == pytest.approx( | np.array([1.5, 1.0]) | numpy.array |
#----------------------------------------------------------------------------------------------------
'''
skmm.py
This file contains the definition of related functions for kernal mean matching
Coded by <NAME>
Date: 2018-11-25
All Rights Reserved.
'''
#----------------------------------------------------------------------------------------------------
import numpy as np
import random
import scipy.linalg as la
from datetime import *
from cala import *
from kernel import *
from nmse import *
class skmm(object):
def __init__(self, X, Y, cY, m, nSam, **kwargs):
self.__X = X
self.__Y = Y
self.__cY = cY
self.__m = m
self.__nSam = nSam
self.__mx = getMean(Y)
self.__xDim, self.__xSam = np.shape(X)
self.__yDim, self.__ySam = np.shape(Y)
self.__cDim, self.__cSam = np.shape(cY)
self.__xMean = getMean(X)
self.__xStd = getStd(X, self.__xMean)
self.__xBeta = getProb(X, self.__xMean, self.__xStd)
self.__kw = getKWidth(X)
self.__Kxx = xysK(X, X, 'Gaussian', self.__kw)
self.__Kxy = xysK(X, Y, 'Gaussian', self.__kw)
#self.__Kxx = xyK(X, X, 'Gaussian')
#self.__Kxy = xyK(X, Y, 'Gaussian')
#def updMean(self, X, mx, Y):
def updMean(self, X, Y):
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
assert xDim == yDim, 'The dimensionality of X and Y are not identical !'
mx = self.__mx
n = xSam + ySam
for i in range(xDim):
mx[i] = mx[i] * xSam
for j in range(ySam):
mx[i] = mx[i] + Y[i][j]
mx[i] = mx[i] / n
self.__mx = mx
return mx
def updY(self, X, tX):
xDim, xSam = np.shape(X)
tDim, tSam = np.shape(Y)
assert xDim == tDim, 'The dimensionality of X and tX are not identical !'
n = xSam + tSam
Y = np.column_stack((X, tX))
return Y
def getAind(self, X, n):
xDim, xSam = np.shape(X)
tmk = xysK(X, X, 'Gaussian', self.__kw) # cannot replaced with self.__Kxy
tm = np.sum(tmk, axis=0)
assert len(tm) == xSam, 'The direction of operation may be incorrect !'
idx = np.argsort(- tm)
ix = idx[0:n]
return ix
def getBind(self, X, n, rn):
xDim, xSam = np.shape(X)
index = np.arange(xSam)
random.shuffle(index)
ind = index[0:rn]
tX = X[:, ind]
tmk = xysK(tX, X, 'Gaussian', self.__kw)
tm = np.sum(tmk, axis=0)
assert len(tm) == xSam, 'The direction of operation may be incorrect !'
idx = np.argsort(- tm)
ix = idx[0:n]
return ix
def getWeight(self, X, ind, mx):
xDim, xSam = np.shape(X)
#tDim, tSam = np.shape(tX)
#assert xDim == tDim, 'The dimensionality of X and tX are not identical !'
#mx = np.mean(X, axis=1)
mx = self.__mx
mw = np.zeros((xSam, 1))
for i in range(xSam):
tmp = X[:, i] - mx
tmp = tmp * tmp
tmp = np.sum(tmp)
tmp = np.exp(-tmp / self.__kw)
mw[i, 0] = tmp
tmw = mw[ind, 0]
sw = np.sum(mw)
stw = np.sum(tmw)
weight = float(stw) / sw
return weight
# +++++ The kmm functions +++++
def setLayer(self, b, P, k):
bDep, bRow, bCol = np.shape(b)
pRow, pCol = np.shape(P)
assert bRow == pRow, 'The dimensionality of b and P are not identical !'
assert bCol == pCol, 'The dimensionality of b and P are not identical !'
for i in range(pRow):
for j in range(pCol):
b[k, i, j] = P[i, j]
return b
def together(self, b):
bDep, bRow, bCol = np.shape(b)
assert bDep > 1, 'The depth of b is incorrect !'
m = np.zeros((bRow, bCol))
for i in range(bRow):
for j in range(bCol):
for k in range(bDep):
m[i, j] = m[i, j] + b[k, i, j]
return m
# +++++ global kmm +++++
def glokmm(self, X, Y, n):
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
assert xDim == yDim, 'The dimensionality of X and Y are not identical !'
sKxx = xysK(X, X, 'Gaussian', self.__kw)
#sKxx = self.__Kxy
U, s, V = la.svd(sKxx)
V = np.transpose(V)
s, r = getRank(s)
minv = ginv(U, V, s, r)
minv = minv * 0.5
ind = self.getAind(Y, n)
tY = Y[:, ind]
tmk = xysK(X, tY, 'Gaussian', self.__kw)
P = np.dot(minv, tmk)
trs = float(n) / ySam
P = P * trs
weight = self.getWeight(Y, ind, self.__mx)
P = P * weight
return P, sKxx
def iglokmm(self, X, Y, n):
P, sKxx = self.glokmm(X, Y, n)
sKxy = xysK(X, Y, 'Gaussian', self.__kw)
#tmp = inmse(X, Y, P)
tmp = nmser(P, sKxx, sKxy)
return tmp
#def tglokmm(self, m, nSam):
def tglokmm(self):
X = self.__X
Y = self.__Y
cY = self.__cY
#yDim, ySam = np.shape(X)
#cDim, cSam = np.shape(cY)
#assert yDim == cDim, 'The dimensionality of Y and cY are not identical !'
ySam = self.__ySam
cSam = self.__cSam
m = self.__m
nSam = self.__nSam
n = int(np.floor(cSam / nSam))
nmse = np.zeros((n, 1))
cost = np.zeros((n, 1))
tmy = Y
for i in range(n):
tY = cY[:, i*nSam:(i+1)*nSam]
tmy = np.column_stack((tmy, tY))
oldtime = datetime.now()
tmp = self.iglokmm(X, tmy, m)
newtime = datetime.now()
tmq = (newtime - oldtime).microseconds
nmse[i] = tmp
cost[i] = tmq
ch = str(i) + '-th slice: ' + str(tmp)
th = str(i) + '-th cost time:' + str(tmq)
print(ch)
print(th)
print('-------------------------------------')
return nmse, cost
# +++++ skmm +++++
def skmm(self, X, Y, n, rn, mx): # skmm(X, Y, n, rn, k)
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
assert xDim == yDim, 'The dimensionality of X and Y are not identical !'
#Kxx = xysK(X, X, 'Gaussian', self.__kw)
#d = np.ones((xSam, 1)) * 0.0001
#d = np.diag(d[:, 0])
#tmp = self.__Kxx + d
#minv = la.inv(tmp)
U, s, V = la.svd(self.__Kxx)
V = np.transpose(V)
s, r = getRank(s)
minv = ginv(U, V, s, r)
minv = minv * 0.5
ind = self.getBind(Y, n, rn)
tY = Y[:, ind]
#tmk = xyK(X, tY, 'Gaussian')
tmk = xysK(X, tY, 'Gaussian', self.__kw)
P = np.dot(minv, tmk)
trs = float(n) / ySam
P = P * trs
weight = self.getWeight(Y, ind, mx)
P = P * weight
return P
def iskmm(self, X, Y, n, rn, times): # iskmm(X, Y, n, rn, k, times)
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
assert xDim == yDim, 'The dimensionality of X and Y are not identical !'
b = np.zeros((times, xSam, n))
for i in range(times):
ch = str(i) + '-th running'
print(ch)
P = self.skmm(X, Y, n, rn)
self.setLayer(b, P, i)
m = self.together(b)
tmp = inmse(X, Y, m)
return tmp
# +++++ Temporal skmm +++++
def tskmm(self, X, Y, tY, n, rn, times):
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
assert xDim == yDim, 'The dimensionality of X and Y are not identical !'
Y = np.column_stack((Y, tY))
b = np.zeros((times, xSam, n))
mx = self.updMean(Y, tY)
for i in range(times):
#ch = str(i) + '-th running'
#print(ch)
P = self.skmm(X, Y, n, rn, mx)
self.setLayer(b, P, i)
Kxy = xysK(X, Y, 'Gaussian', self.__kw)
m = self.together(b)
m = m / times
tmp = nmser(m, self.__Kxx, Kxy)
return tmp, Y
def itskmm(self, im, rn, times):
X = self.__X
Y = self.__Y
cY = self.__cY
ySam = self.__ySam
cSam = self.__cSam
nSam = self.__nSam
#yDim, ySam = np.shape(X)
#cDim, cSam = np.shape(cY)
#assert yDim == cDim, 'The dimensionality of Y and cY are not identical !'
n = int(np.floor(cSam / nSam))
nmse = np.zeros((n, 1))
cost = np.zeros((n, 1))
for i in range(n):
tY = cY[:, i*nSam:(i+1)*nSam]
oldtime = datetime.now()
tmp, Y = self.tskmm(X, Y, tY, im, rn, times)
newtime = datetime.now()
tmq = (newtime - oldtime).microseconds
nmse[i] = tmp
cost[i] = tmq
ch = str(i) + '-th slice: ' + str(tmp)
th = str(i) + '-th cost time:' + str(tmq)
print(ch)
print(th)
return nmse, cost
# +++++ temporal enskmm +++++
def senkmm(self, X, Y, k):
xDim, xSam = np.shape(X)
yDim, ySam = np.shape(Y)
#U, s, V = la.svd(self.__Kxx)
#V = np.transpose(V)
#s, r = getRank(s)
#minv = ginv(U, V, s, r)
Kxx = xysK(X, X, 'Gaussian', self.__kw)
d = np.ones((xSam, 1)) * 0.0001
d = np.diag(d[:, 0])
tmp = Kxx + d
minv = la.inv(tmp)
#U, s, V = la.svd(Kxx)
#V = np.transpose(V)
#s, r = getRank(s)
#minv = ginv(U, V, s, r)
minv = minv * 0.5
#ran = list(range(self.__ySam))
#random.shuffle(ran)
#tY = Y[:, ran]
Kxy = xysK(X, Y, 'Gaussian', self.__kw)
num = int(np.floor(ySam / k))
P = np.zeros((self.__xSam, num))
for i in range(k):
if i != k-1:
start = i*num
end = (i+1)*num
else:
start = i*num
end = self.__ySam
tmk = Kxy[:, start:end]
tmp = np.dot(minv, tmk)
d = end - start
trs = float(d) / self.__ySam
tmp = tmp * trs
tmp = tmp * (float(1) / k)
for ii in range(self.__xSam):
for jj in range(d):
P[ii, jj] = P[ii, jj] + tmp[ii, jj]
return P, Kxx
def ienkmm(self, X, Y, k):
P, sKxx = self.senkmm(X, Y, k)
sKxy = xysK(X, Y, 'Gaussian', self.__kw)
#tmp = inmse(X, Y, P)
tmp = nmser(P, sKxx, sKxy)
return tmp
def tenkmm(self, k):
X = self.__X
Y = self.__Y
cY = self.__cY
xSam = self.__xSam
ySam = self.__ySam
cSam = self.__cSam
nSam = self.__nSam
#U, s, V = la.svd(self.__Kxx)
#V = np.transpose(V)
#s, r = getRank(s)
#minv = ginv(U, V, s, r)
#d = np.ones((xSam, 1)) * 0.0001
#d = np.diag(d[:, 0])
#tmp = self.__Kxx + d
#minv = la.inv(tmp)
#minv = minv * 0.5
n = int(np.floor(cSam / nSam))
nmse = np.zeros((n, 1))
cost = np.zeros((n, 1))
tmy = Y
for iter in range(n):
tY = cY[:, iter*nSam:(iter+1)*nSam]
tmy = | np.column_stack((tmy, tY)) | numpy.column_stack |
import os.path
import torch
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import csv
#from import getfeats_dlib_fromImg
from APDrawingGAN.data.face_landmark import getfeats_dlib_fromImg
from APDrawingGAN.data.base_dataset import BaseDataset, get_transform
from APDrawingGAN.data.image_folder import make_dataset
def getfeats(featpath):
#print('getfeats0')
trans_points = np.empty([5,2],dtype=np.int64)
with open(featpath, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=' ')
for ind,row in enumerate(reader):
trans_points[ind,:] = row
return trans_points
def getSoft(size,xb,yb,boundwidth=5.0):
xarray = np.tile(np.arange(0,size[1]),(size[0],1))
yarray = np.tile(np.arange(0,size[0]),(size[1],1)).transpose()
cxdists = []
cydists = []
for i in range(len(xb)):
xba = np.tile(xb[i],(size[1],1)).transpose()
yba = np.tile(yb[i],(size[0],1))
cxdists.append(np.abs(xarray-xba))
cydists.append(np.abs(yarray-yba))
xdist = np.minimum.reduce(cxdists)
ydist = np.minimum.reduce(cydists)
manhdist = np.minimum.reduce([xdist,ydist])
im = (manhdist+1) / (boundwidth+1) * 1.0
im[im>=1.0] = 1.0
return im
class SingleDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt, img_background=None):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot)
self.A_paths = make_dataset(self.dir_A)
self.A_paths = sorted(self.A_paths)
self.transform = get_transform(opt)
self.A_path = opt.im_p
self.img_background = opt.img_background
def __getitem__(self, index):
A_path = self.A_path#s[index]
A_img = Image.open(A_path).convert('RGB')
A = self.transform(A_img)
#print(self.transform)
#print('A0=',A.shape)
if self.opt.which_direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
if input_nc == 1: # RGB to gray
tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
A = tmp.unsqueeze(0)
item = {'A': A, 'A_paths': A_path}
#print('A1=',A.shape)
if self.opt.use_local:
regions = ['eyel','eyer','nose','mouth']
basen = os.path.basename(A_path)[:-4]+'.txt'
featdir = self.opt.lm_dir
featpath = os.path.join(featdir,basen)
#feats = getfeats(featpath)
feats = getfeats_dlib_fromImg(A_path)
mouth_x = int((feats[3,0]+feats[4,0])/2.0)
mouth_y = int((feats[3,1]+feats[4,1])/2.0)
ratio = self.opt.fineSize / 256
EYE_H = self.opt.EYE_H * ratio
EYE_W = self.opt.EYE_W * ratio
NOSE_H = self.opt.NOSE_H * ratio
NOSE_W = self.opt.NOSE_W * ratio
MOUTH_H = self.opt.MOUTH_H * ratio
MOUTH_W = self.opt.MOUTH_W * ratio
center = torch.tensor([[feats[0,0],feats[0,1]-4*ratio],[feats[1,0],feats[1,1]-4*ratio],[feats[2,0],feats[2,1]-NOSE_H/2+16*ratio],[mouth_x,mouth_y]])
item['center'] = center
rhs = [EYE_H,EYE_H,NOSE_H,MOUTH_H]
rws = [EYE_W,EYE_W,NOSE_W,MOUTH_W]
if self.opt.soft_border:
soft_border_mask4 = []
for i in range(4):
xb = [np.zeros(rhs[i]), | np.ones(rhs[i]) | numpy.ones |
########################################################################
# Project Name: Decentralised Deep Learning without Forgetting
# Creators: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# Project Owners: <NAME> (<EMAIL>),
# <NAME> (<EMAIL>)
# December 2019
#########################################################################
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.linear_model
import scipy.io as sio
from PLN_Class import PLN
from Admm import optimize_admm
from LoadDataFromMat import importData
import numpy as np
from LwF_based_ADMM import LwF_based_ADMM_LS_Diff
import copy
# Compute the W_ls by solving a Least Squares Regularization problem
def compute_Wls(X,T,lam):
# the X are in n*p form, n sample, each sample has p dims. T is n*Q matrix, each sample is a row vector
inv_matrix = np.linalg.inv(np.dot(X, X.T)+lam*np.eye(X.shape[0]))
W_ls = np.dot(np.dot(T, X.T), inv_matrix).astype(np.float32)
return W_ls
def compute_ol(Y,T,mu, max_iterations):
# Computes the Output matrix by calling the ADMM Algorithm function with given parameters
ol = optimize_admm(T, Y, mu, max_iterations)
return ol
def compute_accuracy(predicted_lbl, true_lbl):
# Computes a Classification Accuracy between true label
acc = 100.*np.mean(np.argmax(predicted_lbl,axis=0)==np.argmax(true_lbl,axis=0))
return acc
def compute_test_outputs(PLN_object_array, W_ls, num_layers, X_test):
# Computes the network output for the first layer
PLN_1 = PLN_object_array[0]
W_initial_top = np.dot(np.dot(PLN_1.V_Q, W_ls), X_test)
W_initial_bottom = PLN_1.normalization( | np.dot(PLN_1.R_l, X_test) | numpy.dot |
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar(object):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray(object):
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(object):
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(object):
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
assert_equal( | np.promote_types('b', 'S30') | numpy.promote_types |
#!/usr/bin/env python
# encoding: utf-8
#
# test_map.py
#
# @Author: <NAME> <andrews>
# @Date: 2017-07-02 13:08:00
# @Last modified by: andrews
# @Last modified time: 2018-03-01 11:03:79
from copy import deepcopy
import numpy as np
from astropy import units as u
import matplotlib
import pytest
from marvin import config
from marvin.core.exceptions import MarvinError
from marvin.tools.maps import Maps
from marvin.tools.quantities import Map, EnhancedMap
from marvin.tests import marvin_test_if
from marvin.utils.datamodel.dap import datamodel
from marvin.utils.datamodel.dap.plotting import get_default_plot_params
from marvin.utils.general.maskbit import Maskbit
value1 = np.array([[16.35, 0.8],
[0, -10.]])
value2 = np.array([[591., 1e-8],
[4., 10]])
value_prod12 = np.array([[9.66285000e+03, 8e-9],
[0, -100]])
ivar1 = np.array([[4, 1],
[6.97789734e+36, 1e8]])
ivar2 = np.array([[10, 1e-8],
[5.76744385e+36, 0]])
ivar_sum12 = np.array([[2.85714286e+00, 9.99999990e-09],
[3.15759543e+36, 0]])
ivar_prod12 = np.array([[1.10616234e-05, 1.56250000e-08],
[0, 0.]])
ivar_pow_2 = np.array([[5.23472002e-08, 9.53674316e-01],
[0, 25]])
ivar_pow_05 = np.array([[3.66072168e-03, 7.81250000e+00],
[0, 0]])
ivar_pow_0 = np.array([[0, 0],
[0, 0]])
ivar_pow_m1 = np.array([[4, 1.],
[0, 1e+08]])
ivar_pow_m2 = np.array([[2.67322500e+02, 1.6e-01],
[0, 2.5e+09]])
ivar_pow_m05 = np.array([[0.97859327, 5],
[0, 0]])
u_flux = u.erg / u.cm**2 / u.s / u.def_unit('spaxel')
u_flux2 = u_flux * u_flux
def _get_maps_kwargs(galaxy, data_origin):
if data_origin == 'file':
maps_kwargs = dict(filename=galaxy.mapspath)
else:
maps_kwargs = dict(plateifu=galaxy.plateifu, release=galaxy.release,
bintype=galaxy.bintype, template_kin=galaxy.template,
mode='local' if data_origin == 'db' else 'remote')
return maps_kwargs
@pytest.fixture(scope='function', params=[('emline_gflux', 'ha_6564'),
('emline_gvel', 'oiii_5008'),
('stellar_vel', None),
('stellar_sigma', None)])
def map_(request, galaxy, data_origin):
maps = Maps(**_get_maps_kwargs(galaxy, data_origin))
map_ = maps.getMap(property_name=request.param[0], channel=request.param[1])
map_.data_origin = data_origin
return map_
class TestMap(object):
def test_map(self, map_, galaxy):
assert map_.getMaps().release == galaxy.release
assert tuple(map_.shape) == tuple(galaxy.shape)
assert map_.value.shape == tuple(galaxy.shape)
assert map_.ivar.shape == tuple(galaxy.shape)
assert map_.mask.shape == tuple(galaxy.shape)
assert (map_.masked.data == map_.value).all()
assert (map_.masked.mask == map_.mask.astype(bool)).all()
assert map_.snr == pytest.approx(np.abs(map_.value * np.sqrt(map_.ivar)))
assert datamodel[map_.getMaps()._dapver][map_.datamodel.full()].unit == map_.unit
def test_plot(self, map_):
fig, ax = map_.plot()
assert isinstance(fig, matplotlib.figure.Figure)
assert isinstance(ax, matplotlib.axes._subplots.Subplot)
assert 'Make single panel map or one panel of multi-panel map plot.' in map_.plot.__doc__
@marvin_test_if(mark='skip', map_={'data_origin': ['db']})
def test_save_and_restore(self, temp_scratch, map_):
fout = temp_scratch.join('test_map.mpf')
map_.save(str(fout))
assert fout.check() is True
map_restored = Map.restore(str(fout), delete=True)
assert tuple(map_.shape) == tuple(map_restored.shape)
@pytest.mark.parametrize('property_name, channel',
[('emline_gflux', 'ha_6564'),
('stellar_vel', None)])
def test_deepcopy(self, galaxy, property_name, channel):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property_name, channel=channel)
map2 = deepcopy(map1)
for attr in vars(map1):
if not attr.startswith('_'):
value = getattr(map1, attr)
value2 = getattr(map2, attr)
if isinstance(value, np.ndarray):
assert np.isclose(value, value2).all()
elif isinstance(value, np.ma.core.MaskedArray):
assert (np.isclose(value.data, value2.data).all() and
(value.mask == value2.mask).all())
elif isinstance(value, Maskbit) or isinstance(value[0], Maskbit):
if isinstance(value, Maskbit):
value = [value]
value2 = [value2]
for mb, mb2 in zip(value, value2):
for it in ['bits', 'description', 'labels', 'mask', 'name']:
assert getattr(mb, it) == getattr(mb2, it)
assert (mb.schema == mb2.schema).all().all()
elif isinstance(value, Maps):
pass
else:
assert value == value2, attr
def test_getMap_invalid_property(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='mythical_property')
assert 'Your input value is too ambiguous.' in str(ee.value)
def test_getMap_invalid_channel(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='emline_gflux', channel='mythical_channel')
assert 'Your input value is too ambiguous.' in str(ee.value)
@marvin_test_if(mark='include', maps={'plateifu': '8485-1901',
'release': 'MPL-6',
'mode': 'local',
'data_origin': 'file'})
def test_quatities_reorder(self, maps):
"""Asserts the unit survives a quantity reorder (issue #374)."""
ha = maps['emline_gflux_ha']
assert ha is not None
assert ha.unit is not None
reordered_ha = np.moveaxis(ha, 0, -1)
assert reordered_ha.unit is not None
class TestMapArith(object):
def test_add_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha + 10.
assert ha10.value == pytest.approx(ha.value + 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_subtract_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha - 10.
assert ha10.value == pytest.approx(ha.value - 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_multiply_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha * 10.
assert ha10.value == pytest.approx(ha.value * 10.)
assert ha10.ivar == pytest.approx(ha.ivar / 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_divide_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha / 10.
assert ha10.value == pytest.approx(ha.value / 10.)
assert ha10.ivar == pytest.approx(ha.ivar * 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
@pytest.mark.parametrize('ivar1, ivar2, expected',
[(ivar1, ivar2, ivar_sum12)])
def test_add_ivar(self, ivar1, ivar2, expected):
assert Map._add_ivar(ivar1, ivar2) == pytest.approx(expected)
@pytest.mark.parametrize('ivar1, ivar2, value1, value2, value_prod12, expected',
[(ivar1, ivar2, value1, value2, value_prod12, ivar_prod12)])
def test_mul_ivar(self, ivar1, ivar2, value1, value2, value_prod12, expected):
ivar = Map._mul_ivar(ivar1, ivar2, value1, value2, value_prod12)
ivar[ | np.isnan(ivar) | numpy.isnan |
#!/usr/bin/env python2
from logging import raiseExceptions
from sys import path
import os
import rospy
import cv2
import numpy as np
import imutils
import argparse
import itertools
import tf2_ros as tf2
import tf2_geometry_msgs
import tf
from collections import defaultdict
from std_msgs.msg import String
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from geometry_msgs.msg import Pose, PoseArray, TransformStamped, PoseStamped
from cv_bridge import CvBridge, CvBridgeError
import cv2.aruco as aruco
import utils
# Names of each possible ArUco tag OpenCV supports
ARUCO_DICT = {
"DICT_4X4_50": aruco.DICT_4X4_50,
"DICT_4X4_100": aruco.DICT_4X4_100,
"DICT_4X4_250": aruco.DICT_4X4_250,
"DICT_4X4_1000": aruco.DICT_4X4_1000,
"DICT_5X5_50": aruco.DICT_5X5_50,
"DICT_5X5_100": aruco.DICT_5X5_100,
"DICT_5X5_250": aruco.DICT_5X5_250,
"DICT_5X5_1000": aruco.DICT_5X5_1000,
"DICT_6X6_50": aruco.DICT_6X6_50,
"DICT_6X6_100": aruco.DICT_6X6_100,
"DICT_6X6_250": aruco.DICT_6X6_250,
"DICT_6X6_1000": aruco.DICT_6X6_1000,
"DICT_7X7_50": aruco.DICT_7X7_50,
"DICT_7X7_100": aruco.DICT_7X7_100,
"DICT_7X7_250": aruco.DICT_7X7_250,
"DICT_7X7_1000": aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": aruco.DICT_ARUCO_ORIGINAL,
"DICT_APRILTAG_16h5": aruco.DICT_APRILTAG_16h5,
"DICT_APRILTAG_25h9": aruco.DICT_APRILTAG_25h9,
"DICT_APRILTAG_36h10": aruco.DICT_APRILTAG_36h10,
"DICT_APRILTAG_36h11": aruco.DICT_APRILTAG_36h11 }
class ImageConverter(object):
def __init__(self, **kwargs):
"""
Object for detecting and tracking ArUco markers.
----------
Args:
Keyword Args:
marker_type {string}: The type of ArUco marker to detect.
marker_size {float}: The size of the ArUco marker in m.
marker_transform_file {string}: The file containing the transformation matrixes between markers and desired pose.
aruco_update_rate {float}: The rate at which the ArUco markers are updated.
aruco_object_id {string}: The name of the object. A TF frame with that name will be broadcasted.
save_dir {string}: The directory where the marker_transform_file will be saved after callibration.
camera_img_topic {string}: The topic where the camera image is published.
camera_info_topic {string}: The topic where the camera info is published.
camera_frame_id {string}: The frame id of the camera.
"""
self.bridge = CvBridge()
# Settings
self.marker_type = kwargs["aruco_type"]
self.marker_size = kwargs["aruco_length"]
self.marker_transform_file = kwargs["aruco_transforms"]
self.aruco_update_rate = kwargs["aruco_update_rate"]
self.aruco_object_id = kwargs["aruco_obj_id"]
self.camera_img_topic = kwargs["camera_img_topic"]
self.camera_info_topic = kwargs["camera_info_topic"]
self.camera_frame_id = kwargs["camera_frame_id"]
#--- Used when finding transforms between markers ----#
self.marker_transforms_list = [] # Transformations between markers
self.marker_id_list = [] # Ids of markers
self.marker_updates_list = []
#-----------------------------------------------------#
#---- Markers detected at each camera frame ----#
self.marker_pose_list = PoseArray() # Poses of markers in camera frame
self.detected_ids = [] # Coresponding detected ids
#----------------------------------------------#
#---- Used at prediction time ----#
self.obj_transform = Pose()
if not self.marker_transform_file is None:
try:
self.marker_transforms = self.load_marker_transform(self.marker_transform_file)
except:
ValueError("Invalid marker transform file")
#--------------------------------#
# ROS Publisher
self.aruco_pub = rospy.Publisher("aruco_img", Image, queue_size=10)
self.tf_brodcaster = tf2.TransformBroadcaster()
self.tf_static_brodcaster = tf2.StaticTransformBroadcaster()
self.tf_buffer = tf2.Buffer()
self.tf_listener = tf2.TransformListener(self.tf_buffer)
# ROS Subscriber
self.image_sub = rospy.Subscriber(
self.camera_img_topic, Image, self.img_cb)
self.info_sub = rospy.Subscriber(
self.camera_info_topic, CameraInfo, self.info_cb)
def load_marker_transform(self, marker_transform_file):
"""
Loads the marker transforms from a file.
----------
Args:
marker_transform_file {string}: The file containing the marker transforms.
----------
Returns:
dict: A dictionary containing the marker transforms.
"""
load_unformated = np.load(marker_transform_file, allow_pickle=True)
mk_transform = load_unformated['mk_tf_dict'][()]
rospy.loginfo(" TF between markers successfully loaded from file.")
return mk_transform
def camera_base_tf(self):
camera_mtx = np.array([[ 0.02158261, -0.99200359, -0.12435061, 0.08366512],
[-0.99954531, -0.01879069, -0.0235814, 0.53404021],
[ 0.0210562, 0.12480302, -0.99195809, 1.27082266],
[ 0., 0., 0., 1.]])
camera_tf = TransformStamped()
assert (isinstance(camera_tf, TransformStamped))
camera_tf.header.stamp = rospy.Time.now()
camera_tf.header.frame_id = "irb120_base"
camera_tf.child_frame_id = self.camera_frame_id
camera_pose = utils.matrix_to_pose(camera_mtx)
camera_tf.transform.translation = camera_pose.position
camera_tf.transform.rotation = camera_pose.orientation
self.tf_static_brodcaster.sendTransform(camera_tf)
def img_cb(self, msg): # Callback function for image msg
"""
Callback when a new image is received.
----------
Args:
msg {Image}: The image message.
----------
self.markers_img: An image with drawn markers.
self.marker_pose_list {PoseArray}: A list of poses of the markers in the camera frame.
self.detected_ids {list}: A corresponding list to self.marker_pose_list, containing the detected ids.
"""
try:
self.color_msg = msg
self.color_img = self.bridge.imgmsg_to_cv2(self.color_msg,"bgr8")
except CvBridgeError as e:
print(e)
markers_img, marker_pose_list, id_list = self.detect_aruco(self.color_img)
self.merkers_img = markers_img
self.marker_pose_list = marker_pose_list
self.detected_ids = id_list
def info_cb(self, msg):
"""
Callback for the camera information.
----------
Args:
msg {CameraInfo}: The camera information message.
----------
self.K {numpy.array}: The camera matrix.
self.D {numpy.array}: The distortion coefficients.
"""
self.K = np.reshape(msg.K,(3,3)) # Camera matrix
self.D = np.array(msg.D) # Distortion matrix. 5 for IntelRealsense, 8 for AzureKinect
def detect_aruco(self, img, broadcast_markers_tf=False):
"""
Given an RDB image detect aruco markers.
----------
Args:
img -- RBG image
----------
Returns:
image_with_aruco -- image with aruco markers
marker_pose_list {PoseArray} -- list of poses of the detected markers
id_list {list} -- list of detected ids
"""
# Create parameters for marker detection
aruco_dict = aruco.Dictionary_get(ARUCO_DICT[self.marker_type])
parameters = aruco.DetectorParameters_create()
parameters.minCornerDistanceRate = 0.02
parameters.minMarkerDistanceRate = 0.05
parameters.adaptiveThreshWinSizeMax = 2
parameters.adaptiveThreshWinSizeMax = 30
parameters.perspectiveRemovePixelPerCell = 4
parameters.perspectiveRemoveIgnoredMarginPerCell = 0.15
parameters.maxErroneousBitsInBorderRate = 0.4
parameters.errorCorrectionRate = 1.
parameters.cornerRefinementMethod = aruco.CORNER_REFINE_CONTOUR
# Detect aruco markers
corners, ids, rejected = aruco.detectMarkers(img, aruco_dict, parameters = parameters)
output_img = img.copy()
output_img = aruco.drawDetectedMarkers(
img, rejected, borderColor=(100, 0, 240))
marker_pose_list = PoseArray()
id_list = []
if len(corners) > 0:
markerLength = self.marker_size
cameraMatrix = self.K
distCoeffs = self.D
output_img = img.copy()
# For numerous markers:
for i, marker_id in enumerate(ids):
# Draw bounding box on the marker
img = aruco.drawDetectedMarkers(img, [corners[i]], marker_id)
rvec,tvec,_ = aruco.estimatePoseSingleMarkers([corners[i]],markerLength, cameraMatrix, distCoeffs)
output_img = aruco.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, 0.05)
# Convert its pose to Pose.msg format in order to publish
marker_pose = self.make_pose(rvec, tvec)
if broadcast_markers_tf == True:
tf_marker = TransformStamped()
tf_marker.header.stamp = rospy.Time.now()
tf_marker.header.frame_id = self.camera_frame_id
tf_marker.child_frame_id = "marker_{}".format(marker_id)
tf_marker.transform.translation = marker_pose.position
tf_marker.transform.rotation = marker_pose.orientation
self.tf_brodcaster.sendTransform(tf_marker)
marker_pose_list.poses.append(marker_pose)
id_list.append(int(marker_id))
else:
output_img = img
out_img = Image()
out_img = self.bridge.cv2_to_imgmsg(output_img, "bgr8")
self.aruco_pub.publish(out_img)
return output_img, marker_pose_list, id_list
def make_pose(self, rvec, tvec):
"""
Given a rotation vector and a translation vector, returns a Pose.
----------
Args:
id {int} -- id of the marker
rvec {np.array} -- rotation vector of the marker
tvec {np.array} -- translation vector of the marker
----------
Returns:
Pose -- Pose of the marker
"""
marker_pose = Pose()
tvec = np.squeeze(tvec)
rvec = np.squeeze(rvec)
r_mat = | np.eye(3) | numpy.eye |
import re
import pickle as pickle
from collections import namedtuple
import numpy as np
import skimage.transform
from scipy.fftpack import fftn, ifftn
from skimage.feature import peak_local_max, canny
from skimage.transform import hough_circle
import skimage.draw
from configuration import config
import utils
import skimage.exposure, skimage.filters
def read_labels(file_path):
id2labels = {}
train_csv = open(file_path)
lines = train_csv.readlines()
i = 0
for item in lines:
if i == 0:
i = 1
continue
id, systole, diastole = item.replace('\n', '').split(',')
id2labels[int(id)] = [float(systole), float(diastole)]
return id2labels
def read_slice(path):
return pickle.load(open(path))['data']
def read_fft_slice(path):
d = pickle.load(open(path))['data']
ff1 = fftn(d)
fh = np.absolute(ifftn(ff1[1, :, :]))
fh[fh < 0.1 * np.max(fh)] = 0.0
d = 1. * fh / np.max(fh)
d = np.expand_dims(d, axis=0)
return d
def read_metadata(path):
d = pickle.load(open(path))['metadata'][0]
metadata = {k: d[k] for k in ['PixelSpacing', 'ImageOrientationPatient', 'ImagePositionPatient', 'SliceLocation',
'PatientSex', 'PatientAge', 'Rows', 'Columns']}
metadata['PixelSpacing'] = np.float32(metadata['PixelSpacing'])
metadata['ImageOrientationPatient'] = np.float32(metadata['ImageOrientationPatient'])
metadata['SliceLocation'] = np.float32(metadata['SliceLocation'])
metadata['ImagePositionPatient'] = | np.float32(metadata['ImagePositionPatient']) | numpy.float32 |
import pandas as pd
import numpy as np
from joblib import dump, load
from sklearn import metrics
import time
def model_save (model, model_name):
file_name = model_name+'.joblib'
dump(model, file_name)
def model_load (model_name):
file_name = model_name+'.joblib'
return load(file_name)
def plot_confidence_interval_for_data (model, X):
"""
Pass 10 - 15 datapoints for better visualization.
This function plots the confidence interval of predictive value for the provided datapoints
Parameters:
-----------
model : model that is built
X : datapoints for evaluation
Returns:
--------
Plot
"""
preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)
preds_ds = pd.DataFrame()
preds_ds['mean'] = preds.mean(axis=1)
preds_ds['std'] = preds.std(axis=1)
fig = plt.figure(figsize=(15,6))
my_xticks = ['datapoint ' + str(i+1) for i in list(preds_ds.index)]
plt.errorbar(x = preds_ds.index, y=preds_ds['mean'], yerr=preds_ds['std'],
fmt='o', color='blue', ecolor='lightblue', capsize=3)
plt.title('Confidence Interval for the predicted value')
plt.xticks(preds_ds.index, my_xticks)
for i in list(preds_ds.index):
m, std = round(preds_ds['mean'][i],1), round(preds_ds['std'][i],2)
s=f' pred={m} \n std dev= {std}'
plt.text(x = i, y=preds_ds['mean'][i], s=s )
plt.show()
def plot_confidence_interval_for_variable (model, X, y, variable):
"""
This function plots the confidence interval of predictive value for the provided variable
Parameters:
-----------
model : model that is built
X : datapoints
y : actual value
variable : variable for evaluation
Returns:
--------
Plot
"""
preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)
X_ds_new = X.copy()
X_ds_new['actual'] = y
X_ds_new['pred'] = np.mean(preds, axis=1)
X_ds_new['pred_std'] = | np.std(preds, axis=1) | numpy.std |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
"""
A demonstration of Q learning for simple_navigation environment
It is the same as train_simple_navigation.py but implemented in tensorflow 2.0
"""
import gym
import os
import random
import social_bot
import logging
import matplotlib.pyplot as plt
import numpy as np
import psutil
import PIL
from social_bot.util.replay_buffer import PrioritizedReplayBuffer, ReplayBuffer
from collections import deque, namedtuple
import time
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
class Options(object):
"""
The class for all the settings
"""
max_steps = int(1e8)
learning_rate = 5e-4
history_length = 2
replay_buffer_size = 500000
discount_factor = 0.99
resized_image_size = (84, 84)
# use nstep reward for updating Q values
nstep_reward = 10
# update model every so many steps
learn_freq = 4
# starts to update model after so many steps
learn_start = 8000
batch_size = 64
# update Q value target net every so many steps
target_net_update_freq = 40000
# exploration linearly decreases from exploration_start to exploration_end
# in the first exploration_steps steps
exploration_steps = 500000
exploration_start = 0.9
exploration_end = 0.01
# function for converting action to feature
# argument _ is for self, because f_action_feature is treated as a class method
f_action_feature = lambda _, action: (0.5 * (action // 5) - 1, 0.5 * (action % 5) - 1)
f_action_to_control = lambda _, action: (0.05 * (action // 5) - 0.1, 0.05 * (action % 5) - 0.1)
action_stand_still = 12
action_discretize_levels = 5
# If greater than 0, we calculate the exponential moving average of discounted reward.
# And use it as baseline for q values.
ema_reward_alpha = 1. - 1e-5
# f_action_feature = lambda _, action: (0.4 * (action // 6) - 1, 0.4 * (action % 6) - 1)
# f_action_to_control = lambda _, action: (0.04 * (action // 6), 0.04 * (action % 6))
# action_stand_still = 0
# action_discretize_levels = 6
# Prioritized Experience Replay: https://arxiv.org/pdf/1511.05952.pdf
use_prioritized_replay = False
prioritized_replay_eps = 1e-6
prioritized_replay_alpha = 0.5
prioritized_replay_beta0 = 0.3
# Gamma is for a new term which gives higher priority to experiences near reward.
# It scales the priority from above by (1+d)**(-gamma), where d is how many steps in
# the future a non-zero rewad will be encountered. It is gamma linearly decreases
# from gamma0 to 0 towards the end of the training.
prioritized_replay_gamma0 = 0.3
log_freq = 1000
save_freq = 100000
model_dir = '/tmp/train_simple_navigation/ema_r_10step'
show_param_stats_freq = 10000
def main(options):
"""
The entrance of the program
Args:
options (Options): options
"""
for attr in dir(options):
if not attr.startswith('__'):
logging.info(" %s=%s" % (attr, options.__getattribute__(attr)))
env = gym.make("SocialBot-SimpleNavigation-v0")
assert isinstance(env.action_space, gym.spaces.Box)
assert isinstance(env.observation_space, gym.spaces.Box)
image_shape = env.observation_space.shape
agent = QAgent(
image_shape=(image_shape[2], ) + options.resized_image_size,
num_actions=options.action_discretize_levels**2,
options=options)
episode_rewards = deque(maxlen=options.log_freq)
steps = deque(maxlen=options.log_freq)
end_q_values = deque(maxlen=options.log_freq)
total_steps = 0
episodes = 0
t0 = time.time()
proc = psutil.Process(os.getpid())
obs = env.reset()
agent.start_new_episode()
episode_reward = 0.
episode_steps = 0
reward = 0
period_reward = 0
logging.info(" mem=%dM" % (proc.memory_info().rss // 1e6))
while total_steps < options.max_steps:
obs = PIL.Image.fromarray(obs).resize(options.resized_image_size,
PIL.Image.ANTIALIAS)
obs = np.transpose(obs, [2, 0, 1])
action, q = agent.act(obs, reward)
control = options.f_action_to_control(action)
new_obs, reward, done, _ = env.step(control)
agent.learn(obs, action, reward, done)
obs = new_obs
episode_reward += reward
period_reward += reward
episode_steps += 1
total_steps += 1
if done:
episodes += 1
episode_rewards.append(episode_reward)
steps.append(episode_steps)
end_q_values.append(q)
reward = 0
episode_reward = 0.
episode_steps = 0
obs = env.reset()
agent.start_new_episode()
if total_steps % options.log_freq == 0 and len(steps) > 0 and len(
episode_rewards) > 0:
logging.info(
" episodes=%s" % episodes + " total_steps=%s" % total_steps +
" fps=%.2f" % (options.log_freq / (time.time() - t0)) +
" mem=%dM" % (proc.memory_info().rss // 1e6) +
" r_per_step=%.3g" % (period_reward / options.log_freq) +
" r_per_episode=%.3g" %
(sum(episode_rewards) / len(episode_rewards)) +
" avg_steps=%.3g" % (sum(steps) / len(steps)) +
" avg_end_q=%.3g" % (sum(end_q_values) / len(steps)) +
" max_end_q=%.3g" % max(end_q_values) +
" min_end_q=%.3g" % min(end_q_values) + agent.get_stats())
period_reward = 0
agent.reset_stats()
steps.clear()
episode_rewards.clear()
end_q_values.clear()
t0 = time.time()
if episodes % options.save_freq == 0:
agent.save_model(options.model_dir + '/agent.model')
Experience = namedtuple(
"Experience",
field_names=["state", "action", "reward", "done", "reward_dist"])
def select(mat, indices):
sel = tf.concat([
tf.reshape(tf.range(mat.shape[0], dtype=tf.int64), (-1, 1)),
tf.reshape(tf.cast(indices, dtype=tf.int64), (-1, 1))
],
axis=1)
return tf.gather_nd(mat, sel)
class QAgent(object):
"""
A simple Q learning agent for discrete action space
"""
def __init__(self, image_shape, num_actions, options):
num_image_channels = image_shape[0]
num_input_channels = num_image_channels * (options.history_length + 1)
if options.f_action_feature is not None:
num_input_channels += len(
(options.f_action_feature)(0)) * options.history_length
self._num_actions = num_actions
self._options = options
input_shape = image_shape[1:] + (num_input_channels, )
self._acting_net = Network("acting_net", input_shape, num_actions)
self._target_net = Network("target_net", input_shape, num_actions)
self._optimizer = tf.keras.optimizers.Adam(lr=options.learning_rate)
self._huber_loss = tf.keras.losses.Huber()
self._episode_steps = 0
self._total_steps = 0
C = PrioritizedReplayBuffer if options.use_prioritized_replay else ReplayBuffer
self._replay_buffer = C(
options.replay_buffer_size,
options.history_length,
future_length=options.nstep_reward)
self._history = deque(maxlen=options.history_length)
self.reset_stats()
self._ema_r = 0.
self._ema_c = 0.
def calc_ema_reward(self):
r = self._ema_r
f = 1.
# factor for correcting uncounted future reward
f -= self._options.discount_factor * self._ema_c
# factor for correcting limitted steps
f -= self._options.ema_reward_alpha**self._total_steps
return r / f
def act(self, obs, reward):
"""
Calcuate the action for the current step
Args:
obs (np.array): observation for the current step
reward (float): reward received for the previous step
Returns:
int: action id
"""
eps = self.get_exploration_rate()
if len(self._history) > 0:
self._history[-1] = self._history[-1]._replace(reward=reward)
if self._episode_steps < self._options.history_length:
action = self._options.action_stand_still
q = 0
else:
input = self._make_input(obs, self._history)
q_values = self._acting_net.calc_q_values(input)
q_values = q_values.numpy().reshape(-1)
if random.random() < eps:
action = random.randint(0, self._num_actions - 1)
else:
action = np.argmax(q_values)
q = q_values[action]
if self._options.ema_reward_alpha > 0:
q += self.calc_ema_reward()
self._sum_act_q += q
self._num_act_q += 1
self._total_steps += 1
self._episode_steps += 1
self._history.append(
Experience(obs, action, reward=0, done=False, reward_dist=0))
return action, q
def get_exploration_rate(self):
p = min(1., float(self._total_steps) / self._options.exploration_steps)
eps = (1 - p) * self._options.exploration_start \
+ p * self._options.exploration_end
return eps
def start_new_episode(self):
self._episode_steps = 0
self._history.clear()
self._ema_c = 0.
def save_model(self, path):
self._acting_net.save_weights(path)
def _get_prioritized_replay_beta(self):
p = min(1., float(self._total_steps) / self._options.max_steps)
return (1 - p) * self._options.prioritized_replay_beta0 + p
def _get_prioritized_replay_gamma(self):
p = min(1., float(self._total_steps) / self._options.max_steps)
return (1 - p) * self._options.prioritized_replay_gamma0
def _update_reward_dist(self):
i = len(self._replay_buffer) - 2
d = 1
indices = []
priorities = []
initial_priority = self._replay_buffer.initial_priority
gamma = self._get_prioritized_replay_gamma()
while i >= 0:
e = self._replay_buffer[i]
if e.reward != 0:
break
self._replay_buffer[i] = e._replace(reward_dist=d)
indices.append(i)
priorities.append(initial_priority * (1 + d)**(-gamma))
d += 1
i -= 1
self._replay_buffer.update_priority(indices, priorities)
@tf.function
def _tf_learn(self, inputs, actions, rewards, next_inputs, dones,
is_weights, ema_reward):
# Double Q Learning: https://arxiv.org/pdf/1509.06461.pdf
qs_next = self._acting_net.calc_q_values(next_inputs)
qs_target = self._target_net.calc_q_values(next_inputs)
a = tf.argmax(qs_next, axis=1)
q_target = select(qs_target, a)
q_target = tf.reshape(q_target, (-1, 1)) + ema_reward
q_target = rewards + (self._options.discount_factor**self._options.
nstep_reward) * q_target * (1 - dones)
with tf.GradientTape() as tape:
qs = self._acting_net.calc_q_values(inputs)
q = tf.reshape(select(qs, actions), (-1, 1)) + ema_reward
td_error = q - q_target
loss = self._huber_loss(q, q_target, sample_weight=is_weights)
grads = tape.gradient(loss, self._acting_net.trainable_variables)
self._optimizer.apply_gradients(
zip(grads, self._acting_net.trainable_variables))
return td_error, q, q_target, loss
def learn(self, obs, action, reward, done):
"""
Perform one stap of learning
Args:
obs (np.array): The observation
action (int): Action taken at this step
reward (float): Reward received for this step
done (bool): Whether reached the end of an episode
"""
self._ema_c = self._options.ema_reward_alpha * (
self._options.discount_factor * self._ema_c - 1) + 1
self._ema_r = self._options.ema_reward_alpha * self._ema_r + self._ema_c * reward
e = Experience(obs, action, reward, done, reward_dist=0)
self._replay_buffer.add_experience(e)
if reward != 0:
self._update_reward_dist()
options = self._options
if self._total_steps <= options.learn_start:
return
if self._total_steps % options.learn_freq != 0:
return
data = self._get_samples(options.batch_size)
with tf.device('/device:GPU:0'):
inputs, actions, rewards, next_inputs, dones, reward_dist, is_weights = map(
tf.convert_to_tensor, data[:-1])
indices = data[-1]
ema_reward = 0.0
if options.ema_reward_alpha > 0:
ema_reward = self.calc_ema_reward()
ema_reward = tf.constant(ema_reward)
is_weights = is_weights**self._get_prioritized_replay_beta()
batch_size = options.batch_size
td_error, q, q_target, loss = self._tf_learn(inputs, actions, rewards,
next_inputs, dones,
is_weights, ema_reward)
# minimize the loss
priorities = abs(td_error.numpy()).reshape(-1)
priorities = (priorities + options.prioritized_replay_eps
)**options.prioritized_replay_alpha
gamma = self._get_prioritized_replay_gamma()
reward_dist = np.reshape(reward_dist, -1)
priorities = priorities * (1 + reward_dist)**(-gamma)
self._replay_buffer.update_priority(indices, priorities)
self._sum_is_weights += np.sum(is_weights)
self._sum_loss += loss
self._sum_q += np.mean(q)
self._sum_q_weighted += np.sum(q * is_weights)
self._sum_q_target += np.mean(q_target)
self._sum_q_target_weighted += np.sum(q_target * is_weights)
self._sum_r += | np.mean(rewards) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
#import moorpy.MoorSolve as msolve
from moorpy.helpers import CatenaryError, dsolve2
def catenary(XF, ZF, L, EA, W, CB=0, HF0=0, VF0=0, Tol=0.000001, nNodes=20, MaxIter=100, plots=0):
'''
The quasi-static mooring line solver. Adapted from catenary subroutine in FAST v7 by <NAME>.
Note: this version is updated Oct 7 2020 to use the dsolve solver.
Parameters
----------
XF : float
Horizontal distance from end 1 to end 2 [m]
ZF : float
Vertical distance from end 1 to end 2 [m] (positive up)
L : float
Unstretched length of line [m]
EA : float
Extensional stiffness of line [N]
W : float
Weight of line in fluid per unit length [N/m]
CB : float, optional
If positive, coefficient of seabed static friction drag. If negative, no seabed contact and the value is the distance down from end A to the seabed in m\
NOTE: friction (CV > 0) should only be applied when end A of the line is at an anchor, otherwise assumptions are violated.
HF0 : float, optional
Horizontal fairlead tension. If zero or not provided, a guess will be calculated.
VF0 : float, optional
Vertical fairlead tension. If zero or not provided, a guess will be calculated.
Tol : float, optional
Convergence tolerance within Newton-Raphson iteration specified as an absolute displacement error
nNodes : int, optional
Number of nodes to describe the line
MaxIter: int, optional
Maximum number of iterations to try before resetting to default ICs and then trying again
plots : int, optional
1: plot output, 0: don't
Returns
-------
: tuple
(end 1 horizontal tension, end 1 vertical tension, end 2 horizontal tension, end 2 vertical tension, info dictionary) [N] (positive up)
'''
# make info dict to contain any additional outputs
info = dict(error=False)
info['call'] = f"catenary({XF}, {ZF}, {L}, {EA}, {W}, CB={CB}, HF0={HF0}, VF0={VF0}, Tol={Tol}, MaxIter={MaxIter}, plots=1)"
# make some arrays if needed for plotting each node
if plots > 0:
s = np.linspace(0,L,nNodes) # Unstretched arc distance along line from anchor to each node where the line position and tension can be output (meters)
Xs= np.zeros(nNodes) # Horizontal locations of each line node relative to the anchor (meters)
Zs= np.zeros(nNodes) # Vertical locations of each line node relative to the anchor (meters)
Te= np.zeros(nNodes) # Effective line tensions at each node (N)
# flip line in the solver if it is buoyant
if W < 0:
W = -W
ZF = -ZF
CB = -10000. # <<< TODO: set this to the distance to sea surface <<<
flipFlag = True
else:
flipFlag = False
# reverse line in the solver if end A is above end B
if ZF < 0:
ZF = -ZF
reverseFlag = True
else:
reverseFlag = False
# ensure the input variables are realistic
if XF <= 0.0:
raise CatenaryError("XF is zero or negative!")
if L <= 0.0:
raise CatenaryError("L is zero or negative!")
if EA <= 0.0:
raise CatenaryError("EA is zero or negative!")
# Solve for the horizontal and vertical forces at the fairlead (HF, VF) and at the anchor (HA, VA)
# There are many "ProfileTypes" of a mooring line and each must be analyzed separately (1-3 are consistent with FAST v7)
# ProfileType=0: Entire line is on seabed
# ProfileType=1: No portion of the line rests on the seabed
# ProfileType=2: A portion of the line rests on the seabed and the anchor tension is nonzero
# ProfileType=3: A portion of the line must rest on the seabed and the anchor tension is zero
# ProfileType=4: The line is negatively buoyant, seabed interaction is enabled, and the line
# is longer than a full L between end points (including stretching) i.e. it is horizontal
# along the seabed from the anchor, then vertical to the fairlaed. Computes the maximum
# stretched length of the line with seabed interaction beyond which the line would have to
# double-back on itself; the line forms an "L" between the anchor and fairlead. Then it
# models it as bunched up on the seabed (instead of throwing an error)
EA_W = EA/W
# calculate what line length would be hanging it it were fully slack, vertical
if CB < 0: # free floating (potentially U shaped case)
LHanging1 = np.sqrt(2.0*( -CB)*EA_W + EA_W*EA_W) - EA_W # unstretched hanging length at end A
LHanging2 = np.sqrt(2.0*(ZF-CB)*EA_W + EA_W*EA_W) - EA_W # unstretched hanging length at end B
LHanging = LHanging1+LHanging2
else: # at least one end on seabed
LHanging = np.sqrt(2.0*ZF*EA_W + EA_W*EA_W) - EA_W # unstretched length of line hanging vertically to seabed
# calculate a vertical stiffness estimate for an end lifting off the seabed
def dV_dZ_s(z0, H): # height off seabed to evaluate at (infinite if 0), horizontal tension
#return W*(z0*W/H + 1)/np.sqrt( (z0*W/H + 1)**2 - 1) # inelastic apprxoimation
return W # returning a fully slack line approximation,
# because a large value here risks adding a bad cross coupling term in the system stiffness matrix
# ProfileType 0 case - entirely along seabed
if ZF==0.0 and CB >= 0.0 and W > 0:
ProfileType = 0
if CB==0 or XF <= L: # case 1: no friction, or zero tension
HF = np.max([0, (XF/L - 1.0)*EA])
HA = 1.0*HF
elif 0.5*L + EA/CB/W*(1-XF/L) <= 0: # case 2: seabed friction but tension at anchor (xB estimate < 0)
HF = (XF/L -1.0)*EA + 0.5*CB*W*L
HA = np.max([0.0, HF - CB*W*L])
else: # case 3: seabed friction and zero anchor tension
HF = np.sqrt(2*EA*CB*W*(XF-L))
HA = 0.0
VF = 0.0
VA = 0.0
if HF > 0: # if taut
dHF_dXF = EA/L # approximation <<< what about friction? <<<<<<<<
#dVF_dZF = W + HF/L # vertical stiffness <<< approximation a
dVF_dZF = dV_dZ_s(Tol, HF) # vertical stiffness <<< approximation b
else: # if slack
dHF_dXF = 0.0
dVF_dZF = W # vertical stiffness
info["HF"] = HF # solution to be used to start next call (these are the solved variables, may be for anchor if line is reversed)
info["VF"] = 0.0
info["stiffnessB"] = np.array([[ dHF_dXF, 0.0], [0.0, dVF_dZF]])
info["stiffnessA"] = np.array([[ dHF_dXF, 0.0], [0.0, dVF_dZF]])
info["stiffnessAB"] = np.array([[-dHF_dXF, 0.0], [0.0, 0.0]])
info["LBot"] = L
info['ProfileType'] = 0
info['Zextreme'] = 0
if plots > 0:
if CB > 0 and XF > L:
xB = L - HF/W/CB # location of point at which line tension reaches zero
else:
xB = 0.0
# z values remain zero in this case
if CB==0 or XF <= L: # case 1: no friction, or zero tension
Xs = XF/L*s # X values uniformly distributed
Te = Te + np.max([0, (XF/L - 1.0)*EA]) # uniform tension
elif xB <= 0: # case 2: seabed friction but tension at anchor
Xs = s*(1+CB*W/EA*(0.5*s-xB))
Te = HF + CB*W*(s-L)
else: # case 3: seabed friction and zero anchor tension
for I in range(nNodes):
if s[I] <= xB: # if this node is in the zero tension range
Xs[I] = s[I]; # x is unstretched, z and Te remain zero
else: # the tension is nonzero
Xs[I] = s[I] + CB*W/EA*(s[I] - xB)**2
Te[I] = HF - CB*W*(L-s[I])
# ProfileType 4 case - fully slack
elif (W > 0.0) and (L >= XF + LHanging):
if CB >= 0.0:
ProfileType = 4
# this is a special case that requires no iteration
HF = 0.0
VF = W*LHanging
HA = 0.0
VA = 0.0
dVF_dZF = W / np.sqrt(2.0*ZF/EA_W + 1.0) # vertical stiffness
info["HF"] = HF # solution to be used to start next call (these are the solved variables, may be for anchor if line is reversed)
info["VF"] = VF
info["stiffnessB"] = np.array([[0.0, 0.0], [0.0, dVF_dZF]])
info["stiffnessA"] = np.array([[0.0, 0.0], [0.0, W]])
info["stiffnessAB"] = np.array([[0.0, 0.0], [0.0, 0.0]])
info["LBot"] = L - LHanging
info['ProfileType'] = 4
info['Zextreme'] = 0
if plots > 0:
for I in range(nNodes):
if s[I] > L-LHanging: # this node is on the suspended/hanging portion of the line
Xs[I] = XF
Zs[I] = ZF - ( L-s[I] + 0.5*W/EA*(L-s[I])**2 ) # <<<< double check this
Te[I] = W*(L-s[I])
else: # this node is on the seabed
Xs[I] = np.min([s[I], XF])
Zs[I] = 0.0
Te[I] = 0.0
else: # U shaped
ProfileType = 5
HF = 0.0
VF = W*LHanging2
HA = 0.0
VA = -W*LHanging1
dVF_dZF = W / np.sqrt(2.0*ZF/EA_W + 1.0) # vertical stiffness
info["HF"] = HF # solution to be used to start next call (these are the solved variables, may be for anchor if line is reversed)
info["VF"] = VF
info["stiffnessB"] = np.array([[0.0, 0.0], [0.0, W / np.sqrt(2.0*(ZF-CB)/EA_W + 1.0)]])
info["stiffnessA"] = np.array([[0.0, 0.0], [0.0, W / np.sqrt(2.0*( -CB)/EA_W + 1.0)]])
info["stiffnessAB"] = np.array([[0.0, 0.0], [0.0, 0.0]])
info["LBot"] = L - LHanging
info['ProfileType'] = 5
info['Zextreme'] = CB
if plots > 0:
for I in range(nNodes):
if s[I] < LHanging1: # the 1st suspended/hanging portion of the line
Xs[I] = 0.0
Zs[I] = -s[I] - W/EA*(LHanging1*s[I] - 0.5*s[I]**2 )
Te[I] = W*s[I]
elif s[I] <= L-LHanging2: # the middle portion of the line, slack along the seabed
Xs[I] = (s[I]-LHanging1)*XF/(L-LHanging1-LHanging2)
Zs[I] = CB
Te[I] = 0.0
else: # the 2nd suspended/hanging portion of the line
Lms = L - s[I] # distance from end B
Xs[I] = XF
Zs[I] = ZF - Lms - W/EA*(LHanging2*Lms - 0.5*Lms**2 )
Te[I] = W*Lms
# Use an iterable solver function to solve for the forces on the line
else:
# Initialize some commonly used terms that don't depend on the iteration:
WL = W *L
WEA = W *EA
L_EA = L /EA
CB_EA = CB/EA
#MaxIter = 50 #int(1.0/Tol) # Smaller tolerances may take more iterations, so choose a maximum inversely proportional to the tolerance
# more initialization
I = 1 # Initialize iteration counter
FirstIter = 1 # 1 means first attempt (can be retried), 0 means it's alread been retried, -1 triggers a retry
# make HF and VF initial guesses if either was provided as zero <<<<<<<<<<<< why does it matter if VF0 is zero??
if HF0 <= 0 or VF0 <= 0:
XF2 = XF*XF;
ZF2 = ZF*ZF;
if ( L <= np.sqrt( XF2 + ZF2 ) ): # if the current mooring line is taut
Lamda0 = 0.2
else: # The current mooring line must be slack and not vertical
Lamda0 = np.sqrt( 3.0*( ( L*L - ZF2 )/XF2 - 1.0 ) )
HF = np.max([ abs( 0.5*W* XF/ Lamda0 ), Tol ]); # ! As above, set the lower limit of the guess value of HF to the tolerance
VF = 0.5*W*( ZF/np.tanh(Lamda0) + L )
else:
HF = 1.0*HF0
VF = 1.0*VF0
# >>> note, the above Tol uses should be adjusted now that I've changed it to be absolute and distance <<<
# make sure required values are non-zero
HF = np.max([ HF, Tol ])
XF = np.max([ XF, Tol ])
ZF = np.max([ ZF, Tol ])
# some initial values just for printing before they're filled in
EXF=0
EZF=0
# Solve the analytical, static equilibrium equations for a catenary (or taut) mooring line with seabed interaction:
X0 = [HF, VF]
Ytarget = [0,0]
args = dict(cat=[XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA], step=[0.15,1.0,1.5])
# call the master solver function
#X, Y, info2 = msolve.dsolve(eval_func_cat, X0, Ytarget=Ytarget, step_func=step_func_cat, args=args, tol=Tol, maxIter=MaxIter, a_max=1.2)
X, Y, info2 = dsolve2(eval_func_cat, X0, Ytarget=Ytarget, step_func=step_func_cat, args=args,
ytol=Tol, stepfac=1, maxIter=MaxIter, a_max=1.2)
# retry if it failed
if info2['iter'] >= MaxIter-1 or info2['oths']['error']==True or np.linalg.norm(info2['err']) > 10*Tol:
# ! Perhaps we failed to converge because our initial guess was too far off.
# (This could happen, for example, while linearizing a model via large
# pertubations in the DOFs.) Instead, use starting values documented in:
# Peyrot, <NAME>. and <NAME>., "Analysis Of Cable Structures,"
# Computers & Structures, Vol. 10, 1979, pp. 805-813:
# NOTE: We don't need to check if the current mooring line is exactly
# vertical (i.e., we don't need to check if XF == 0.0), because XF is
# limited by the tolerance above. */
if info2['iter'] >= MaxIter-1 and XF/ZF < 0.001: # if it's nearly vertical, keep iterating from the last point
HF = X[0]
VF = X[1]
else: # otherwise try starting from some good initial guesses
if ( L <= np.sqrt( XF**2 + ZF**2 ) ): # if the current mooring line is taut
Lamda0 = 0.2
else: # The current mooring line must be slack and not vertical
Lamda0 = np.sqrt( 3.0*( ( L*L - ZF**2 )/XF**2 - 1.0 ) )
HF = np.max([ abs( 0.5*W* XF/ Lamda0 ), Tol ]) # As above, set the lower limit of the guess value of HF to the tolerance
VF = 0.5*W*( ZF/np.tanh(Lamda0) + L )
X0 = [HF, VF]
Ytarget = [0,0]
args = dict(cat=[XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA], step=[0.1,0.8,1.5]) # step: alpha_min, alpha0, alphaR
# call the master solver function
#X, Y, info3 = msolve.dsolve(eval_func_cat, X0, Ytarget=Ytarget, step_func=step_func_cat, args=args, tol=Tol, maxIter=MaxIter, a_max=1.1) #, dX_last=info2['dX'])
X, Y, info3 = dsolve2(eval_func_cat, X0, Ytarget=Ytarget, step_func=step_func_cat, args=args,
ytol=Tol, stepfac=1, maxIter=MaxIter, a_max=1.2)
# retry if it failed
if info3['iter'] >= MaxIter-1 or info3['oths']['error']==True:
X0 = X
Ytarget = [0,0]
args = dict(cat=[XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA], step=[0.1,1.0,2.0])
# call the master solver function
#X, Y, info4 = msolve.dsolve(eval_func_cat, X0, Ytarget=Ytarget, step_func=step_func_cat, args=args, tol=Tol, maxIter=10*MaxIter, a_max=1.15) #, dX_last=info3['dX'])
X, Y, info4 = dsolve2(eval_func_cat, X0, Ytarget=Ytarget, step_func=step_func_cat, args=args,
ytol=Tol, stepfac=1, maxIter=MaxIter, a_max=1.2)
# check if it failed
if info4['iter'] >= 10*MaxIter-1 or info4['oths']['error']==True:
print("catenary solve failed on all 3 attempts.")
print(f"catenary({XF}, {ZF}, {L}, {EA}, {W}, CB={CB}, HF0={HF0}, VF0={VF0}, Tol={Tol}, MaxIter={MaxIter}, plots=1)")
print("First attempt's iterations are as follows:")
for i in range(info2['iter']+1):
print(f" Iteration {i}: HF={info2['Xs'][i,0]: 8.4e}, VF={info2['Xs'][i,1]: 8.4e}, EX={info2['Es'][i,0]: 6.2e}, EZ={info2['Es'][i,1]: 6.2e}")
print("Second attempt's iterations are as follows:")
for i in range(info3['iter']+1):
print(f" Iteration {i}: HF={info3['Xs'][i,0]: 8.4e}, VF={info3['Xs'][i,1]: 8.4e}, EX={info3['Es'][i,0]: 6.2e}, EZ={info3['Es'][i,1]: 6.2e}")
print("Last attempt's iterations are as follows:")
for i in range(info4['iter']+1):
print(f" Iteration {i}: HF={info4['Xs'][i,0]: 8.4e}, VF={info4['Xs'][i,1]: 8.4e}, EX={info4['Es'][i,0]: 6.2e}, EZ={info4['Es'][i,1]: 6.2e}")
'''
# plot solve performance
fig, ax = plt.subplots(4,1, sharex=True)
ax[0].plot(np.hstack([info2['Xs'][:,0], info3['Xs'][:,0], info4['Xs'][:,0]]))
ax[1].plot(np.hstack([info2['Xs'][:,1], info3['Xs'][:,1], info4['Xs'][:,1]]))
ax[2].plot(np.hstack([info2['Es'][:,0], info3['Es'][:,0], info4['Es'][:,0]]))
ax[3].plot(np.hstack([info2['Es'][:,1], info3['Es'][:,1], info4['Es'][:,1]]))
ax[0].set_ylabel("HF")
ax[1].set_ylabel("VF")
ax[2].set_ylabel("X err")
ax[3].set_ylabel("Z err")
# plot solve path
plt.figure()
#c = np.hypot(info2['Es'][:,0], info2['Es'][:,1])
c = np.arange(info2['iter']+1)
c = cm.jet((c-np.min(c))/(np.max(c)-np.min(c)))
for i in np.arange(info2['iter']):
plt.plot(info2['Xs'][i:i+2,0], info2['Xs'][i:i+2,1],":", c=c[i])
plt.plot(info2['Xs'][0,0], info2['Xs'][0,1],"o")
c = np.arange(info3['iter']+1)
c = cm.jet((c-np.min(c))/(np.max(c)-np.min(c)))
for i in np.arange(info3['iter']):
plt.plot(info3['Xs'][i:i+2,0], info3['Xs'][i:i+2,1], c=c[i])
plt.plot(info3['Xs'][0,0], info3['Xs'][0,1],"*")
c = np.arange(info4['iter']+1)
c = cm.jet((c-np.min(c))/(np.max(c)-np.min(c)))
for i in np.arange(info4['iter']):
plt.plot(info4['Xs'][i:i+2,0], info4['Xs'][i:i+2,1], c=c[i])
plt.plot(info4['Xs'][0,0], info4['Xs'][0,1],"*")
plt.title("catenary solve path for troubleshooting")
plt.show()
#breakpoint()
'''
raise CatenaryError("catenary solver failed.")
else: # if the solve was successful,
info.update(info4['oths']) # copy info from last solve into existing info dictionary
info['catenary'] = info4
else: # if the solve was successful,
info.update(info3['oths']) # copy info from last solve into existing info dictionary
info['catenary'] = info3
else: # if the solve was successful,
info.update(info2['oths']) # copy info from last solve into existing info dictionary
info['catenary'] = info2
# check for errors ( WOULD SOME NOT ALREADY HAVE BEEN CAUGHT AND RAISED ALREADY?)
if info['error']==True:
#breakpoint()
# >>>> what about errors for which we can first plot the line profile?? <<<<
raise CatenaryError("Error in catenary computations: "+info['message'])
#if info['Zextreme'] < CB:
# info["warning"] = "Line is suspended from both ends but hits the seabed (this isn't allowed in MoorPy)"
ProfileType = info['ProfileType']
HF = X[0]
VF = X[1]
HA = info['HA']
VA = info['VA']
# --- now that the iterative solve is over, check some things on the results, handle plotting, etc. ---
# compute the Zextreme value - for a freely suspended line, if necessary, check to ensure the line doesn't droop and hit the seabed
if info['ProfileType']==1 and CB < 0 and VF-WL < 0.0: # only need to do this if the line is slack (has zero slope somewhere)
VFMinWL = VF - WL;
LBot = L - VF/W; # unstretched length of line resting on seabed (Jonkman's PhD eqn 2-38), LMinVFOVrW
HF_W = HF/W;
HF_WEA = HF/WEA
VF_WEA = VF/WEA
VF_HF = VF/HF
VFMinWL_HF = VFMinWL/HF
VF_HF2 = VF_HF *VF_HF
VFMinWL_HF2 = VFMinWL_HF*VFMinWL_HF
SQRT1VF_HF2 = np.sqrt( 1.0 + VF_HF2 )
SQRT1VFMinWL_HF2 = np.sqrt( 1.0 + VFMinWL_HF2 )
# this is indicated by the anchor force having a positive value, meaning it's helping hold up the line
info["Sextreme"] = L-VF/W # arc length where slope is zero
info["Zextreme"] = (1 - SQRT1VFMinWL_HF2)*HF_W - 0.5* VFMinWL**2/WEA # max or min line elevation (where slope=0)
info["Xextreme"] = ( -np.log(VFMinWL_HF + SQRT1VFMinWL_HF2))*HF_W + HF*info["Sextreme"]/EA
else:
info["Sextreme"] = 0.0
info["Zextreme"] = 0.0
info["Xextreme"] = 0.0
# handle special case of a U-shaped line that has seabed contact (using 2 new catenary solves)
if info['ProfileType']==1 and info["Zextreme"] < CB:
# we will solve this as two separate lines to form the U shape
info['ProfileType'] = 'U'
ProfileType = 'U'
X1_0 = info['Xextreme'] # define fake anchor point as lowest point of line (if the seabed wasn't there)
X2_0 = XF - X1_0
L1 = info['Sextreme']
L2 = L-L1
Z1 = CB # negative of height from seabed to original 'anchor' end [m]
Z2 = -Z1 + ZF # height from seabed to fairlead end
# set up a 1D solve for the correct choice of the anchor point so that horizontal tensions balance
def eval_func_U(X, args):
info = dict(error=False)
X1 = X[0]
X2 = XF-X1
# note: reducing tolerances for these sub-calls <<< how much is good? <<<
(fAH1, fAV1, fBH1, fBV1, info1) = catenary(X1, Z1, L1, EA, W, CB=0, Tol=0.5*Tol, MaxIter=MaxIter)
(fAH2, fAV2, fBH2, fBV2, info2) = catenary(X2, Z2, L2, EA, W, CB=0, Tol=0.5*Tol, MaxIter=MaxIter)
Himbalance = fBH2 - fBH1
K1 = info1['stiffnessA'] # note: this refers to the upper end of this half of the line (since it is called with Z<0)
K2 = info2["stiffnessB"]
info['dH_dX'] = K1[0,0] + K2[0,0] # horizontal stiffness on connection point on seabed between two line portions
#print(f" X1 = {X1}, H1 = {fBH1}, H2 = {fBH2}, err={Himbalance}, dH/dX = {info['dH_dX']}")\
#breakpoint()
return np.array([Himbalance]), info, False # returns Y value, misc dict, and stop flag
def step_func_U(X, args, Y, info, Ytarget, err, tols, iter, maxIter):
dX = - err[0] / info['dH_dX']
#print(f" Step is {dX}")
return np.array([dX]) # returns dX (step to make)
# call this to solve for line shapes that balance the horizontal tension in the line
X, Y, infoU = dsolve2(eval_func_U, [X1_0], step_func=step_func_U, ytol=0.25*Tol, stepfac=1, maxIter=20, a_max=1.2, display=0)
X1 = X[0]
X2 = XF-X1
# call one more time to get final values
(fAH1, fAV1, fBH1, fBV1, info1) = catenary(X1, Z1, L1, EA, W, CB=0, Tol=0.5*Tol, MaxIter=MaxIter, plots=plots)
(fAH2, fAV2, fBH2, fBV2, info2) = catenary(X2, Z2, L2, EA, W, CB=0, Tol=0.5*Tol, MaxIter=MaxIter, plots=plots)
if plots > 0 or (info1['error'] and info2['error']):
s = np.hstack([ info1["s" ] , info2["s" ]+L1 ])
Xs = np.hstack([ info1["X" ] , info2["X" ]+X1 ])
Zs = np.hstack([ info1["Z" ] , info2["Z" ]+Z1 ])
Te = np.hstack([ info1["Te"] , info2["Te"] ])
# re-reverse line distributed data back to normal if applicable
'''
if reverseFlag:
info['s'] = L - info['s' ][::-1]
info['X'] = XF - info['X' ][::-1]
info['Z'] = info['Z' ][::-1] - ZF # remember ZF still has a flipped sign right now
info['Te'] = info['Te'][::-1]
'''
if flipFlag:
raise Exception("flipFlag connot be True for the case of a U shaped line with seabed contact. Something must be wrong.")
# get stiffnesses (check sign of A!)
K1 = info1['stiffnessA'] # note: this refers to the upper end of this half of the line (since it is called with Z<0)
K2 = info2['stiffnessB']
dH_dX = 1./(1./K1[0,0] + 1./K2[0,0]) # = K1[0,0]*K2[0,0]/(K1[0,0] + K2[0,0])
Kmid = K1[0,0] + K2[0,0] # horizontal stiffness on connection point on seabed between two line portions
dxdH = 1.0/Kmid #= 1/(K1[0,0] + K2[0,0])
info['stiffnessA'] = np.array([[ dH_dX , K1[0,1] *K2[0,0]*dxdH ],
[K1[1,0] *K2[0,0]*dxdH, K1[1,1] -K1[1,0]*dxdH*K1[0,1]]])
info['stiffnessB'] = np.array([[ dH_dX , K2[0,1] *K1[0,0]*dxdH ],
[K2[1,0] *K1[0,0]*dxdH, K2[1,1] -K2[1,0]*dxdH*K2[0,1]]])
info['stiffnessAB']= np.array([[-K1[0,0] *K2[0,0]*dxdH, -K1[0,1] *K2[0,0]*dxdH ], # this is the lower-left submatrix, A motions, B reaction forces
[-K1[0,0] *dxdH*K2[1,0], -K1[0,1] *dxdH*K2[1,0] ]])
# xA zA xB zB
info['K'] = np.array([[ K1[0,0] *K2[0,0]*dxdH, K1[0,1] *K2[0,0]*dxdH , -K2[0,0] *K1[0,0]*dxdH, -K2[0,1] *K1[0,0]*dxdH ], # HA
[ K1[1,0] *K2[0,0]*dxdH, K1[1,1] -K1[1,0]*dxdH*K1[0,1], -K2[0,0] *dxdH*K1[1,0], -K2[0,1] *dxdH*K1[1,0] ], # VA
[ -K1[0,0] *K2[0,0]*dxdH, -K1[0,1] *K2[0,0]*dxdH , K2[0,0] *K1[0,0]*dxdH, K2[0,1] *K1[0,0]*dxdH ], # HB
[ -K1[0,0] *dxdH*K2[1,0], -K1[0,1] *dxdH*K2[1,0] , K2[1,0] *K1[0,0]*dxdH, K2[1,1] -K2[1,0]*dxdH*K2[0,1] ]]) # VB
'''
\frac{ \pderiv{H_A}{x_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&\frac{ \pderiv{H_A}{z_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&-\frac{\pderiv{H_A}{x_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&-\frac{\pderiv{H_A}{x_A}\pderiv{H_B}{z_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}} \\
\frac{ \pderiv{V_A}{x_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&\pderiv{V_A}{z_A} - \frac{ \pderiv{H_A}{z_A}\pderiv{V_A}{x_A}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&-\frac{\pderiv{V_A}{x_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&-\frac{\pderiv{V_A}{x_A}\pderiv{H_B}{z_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}} \\
-\frac{ \pderiv{H_A}{x_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&-\frac{\pderiv{H_A}{z_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&\frac{ \pderiv{H_A}{x_A}\pderiv{H_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&\frac{ \pderiv{H_A}{x_A}\pderiv{H_B}{z_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}} \\
-\frac{ \pderiv{H_A}{x_A}\pderiv{V_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&-\frac{\pderiv{H_A}{z_A}\pderiv{V_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&\frac{ \pderiv{H_A}{x_A}\pderiv{V_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
&\pderiv{V_B}{z_B} - \frac{ \pderiv{H_B}{z_B}\pderiv{V_B}{x_B}}{\pderiv{H_A}{x_A}+\pderiv{H_B}{x_B}}
for a normal line
\pderiv{H_B}{x_B} &? & -\pderiv{H_B}{x_B} & \pderiv{H_B}{z_B}\\ # HA
? & ? & 0 & 0 \\ # VA
-\pderiv{H_B}{x_B}+ & 0ish & \pderiv{H_B}{x_B} & \pderiv{H_B}{z_B}\\ # HB
-\pderiv{V_B}{x_B}+ & 0ish & \pderiv{V_B}{x_B} & \pderiv{V_B}{z_B} # VB
# sorted
K = np.array([[ dH_dX, K1[0,1] *K2[0,0]*dxdH , -dH_dX, -K2[0,1] *K1[0,0]*dxdH ], # HA
[ K1[1,0] *K2[0,0]*dxdH, K1[1,1] -K1[1,0]*dxdH*K1[0,1], -K2[0,0] *dxdH*K1[1,0], -K2[0,1] *dxdH*K1[1,0] ], # VA
[ -dH_dX, -K1[0,1] *K2[0,0]*dxdH , dH_dX, K2[0,1] *K1[0,0]*dxdH ], # HB
[ -K1[0,0] *dxdH*K2[1,0], -K1[0,1] *dxdH*K2[1,0] , K2[1,0] *K1[0,0]*dxdH, K2[1,1] -K2[1,0]*dxdH*K2[0,1] ]]) # VB
'''
info['LBot'] = info1['LBot'] + info2['LBot']
# not very useful outputs for this case:
info["Sextreme"] = L1 - info1['LBot']
info["Zextreme"] = CB
info["Xextreme"] = X1 - info1['LBot']
#FxA = fAH1
#FzA = fAV1
#FxB = fBH2
#FzB = fBV2
HA = fAH1
VA = fAV1
HF = -fBH2
VF = -fBV2
if plots > 3:
plt.plot(Xs, Zs)
plt.show()
# the normal case
else:
# do plotting-related calculations if needed (plots=1: show plots; plots=2: just return values)
if plots > 0 or info['error']==True:
# calculate some commonly used terms that depend on HF and VF: AGAIN
VFMinWL = VF - WL;
LBot = L - VF/W; # unstretched length of line resting on seabed (Jonkman's PhD eqn 2-38), LMinVFOVrW
HF_W = HF/W;
#HF_WEA = HF/WEA
#VF_WEA = VF/WEA
VF_HF = VF/HF
VFMinWL_HF = VFMinWL/HF
VF_HF2 = VF_HF *VF_HF
#VFMinWL_HF2 = VFMinWL_HF*VFMinWL_HF
#SQRT1VF_HF2 = np.sqrt( 1.0 + VF_HF2 )
SQRT1VFMinWL_HF2 = np.sqrt( 1.0 + VFMinWL_HF**2 )
for I in range(nNodes):
# calculate some values for the current node
Ws = W *s[I]
VFMinWLs = VFMinWL + Ws # = VF - W*(L-s[I])
VFMinWLs_HF = VFMinWLs/HF
s_EA = s[I] /EA
SQRT1VFMinWLs_HF2 = np.sqrt( 1.0 + VFMinWLs_HF*VFMinWLs_HF )
# No portion of the line rests on the seabed
if ProfileType==1:
Xs[I] = ( np.log( VFMinWLs_HF + SQRT1VFMinWLs_HF2 ) - np.log( VFMinWL_HF + SQRT1VFMinWL_HF2 ) )*HF_W + s_EA* HF;
Zs[I] = ( SQRT1VFMinWLs_HF2 - SQRT1VFMinWL_HF2 )*HF_W + s_EA*( VFMinWL + 0.5*Ws );
Te[I] = np.sqrt( HF*HF + VFMinWLs*VFMinWLs );
# A portion of the line must rest on the seabed and the anchor tension is zero
elif ProfileType in [2,3]:
if CB > 0:
xB = LBot - HF_W/CB # location of point at which line tension reaches zero
else:
xB = 0.0
xBlim = max(xB, 0.0)
if s[I] <= xB: # (aka Lbot - s > HF/(CB*W) ) if this node rests on the seabed and the tension is zero
Xs[I] = s[I];
Zs[I] = 0.0;
Te[I] = 0.0;
elif( s[I] <= LBot ): # // .TRUE. if this node rests on the seabed and the tension is nonzero
Xs[I] = s[I] + 0.5*CB*W/EA * (s[I]*s[I] - 2.0*xB*s[I] + xB*xBlim)
Zs[I] = 0.0;
Te[I] = HF + CB*VFMinWLs;
else: # // LBot < s <= L ! This node must be above the seabed
Xs[I] = LBot + HF_W*np.log( VFMinWLs_HF + SQRT1VFMinWLs_HF2 ) + HF*s_EA + 0.5*CB*W/EA *(-LBot*LBot + xB*xBlim);
Zs[I] = ( -1.0 + SQRT1VFMinWLs_HF2)*HF_W + s_EA*(VFMinWL + 0.5*Ws ) + 0.5* VFMinWL*VFMinWL/WEA;
Te[I] = np.sqrt( HF*HF + VFMinWLs*VFMinWLs );
if plots > 0:
# re-reverse line distributed data back to normal if applicable
if reverseFlag:
s = L - s [::-1]
Xs= XF - Xs[::-1]
Zs= Zs[::-1] - ZF # remember ZF still has a flipped sign right now
Te= Te[::-1]
if flipFlag:
Zs = -Zs # flip calculated line Z coordinates (hopefully this is right)
# save data to info dict
info["X" ] = Xs
info["Z" ] = Zs
info["s" ] = s
info["Te"] = Te
if plots==2 or info['error']==True: # also show the profile plot
plt.figure()
plt.plot(Xs,Zs)
# get A and AB stiffness matrices for catenary profiles here based on fairlead (B) stiffness matrix
if ProfileType == 1:
info['stiffnessA'] = np.array(info['stiffnessB'])
info['stiffnessAB'] = -info['stiffnessB']
elif ProfileType in [2,3]:
if CB == 0.0:
info['stiffnessA'] = np.array([[info['stiffnessB'][0,0], 0], [0, dV_dZ_s(Tol, HF)]]) # vertical term is very approximate
info['stiffnessAB'] = np.array([[-info['stiffnessB'][0,0], 0], [0, 0]]) # note: A and AB stiffnesses for this case only valid if zero friction
else:
info['stiffnessA'] = | np.ones([2,2]) | numpy.ones |
from .. import select
from .. import utils
from .._lazyload import matplotlib as mpl
from . import colors
from .tools import create_colormap
from .tools import create_normalize
from .tools import generate_colorbar
from .tools import generate_legend
from .tools import label_axis
from .utils import _get_figure
from .utils import _in_ipynb
from .utils import _is_color_array
from .utils import _with_default
from .utils import parse_fontsize
from .utils import show
from .utils import temp_fontsize
import numbers
import numpy as np
import pandas as pd
import warnings
plt = mpl.pyplot
def _squeeze_array(x):
x = utils.toarray([x]).squeeze()
try:
len(x)
except TypeError:
x = x[None]
return x
class _ScatterParams(object):
def __init__(
self,
x,
y,
z=None,
c=None,
mask=None,
discrete=None,
cmap=None,
cmap_scale=None,
vmin=None,
vmax=None,
s=None,
legend=None,
colorbar=None,
xlabel=None,
ylabel=None,
zlabel=None,
label_prefix=None,
shuffle=True,
):
self._x = x
self._y = y
self._z = z if z is not None else None
self._c = c
self._mask = mask
self._discrete = discrete
self._cmap = cmap
self._cmap_scale = cmap_scale
self._vmin_set = vmin
self._vmax_set = vmax
self._s = s
self._legend = legend
self._colorbar = colorbar
self._labels = None
self._c_discrete = None
self._label_prefix = label_prefix
self._xlabel = xlabel
self._ylabel = ylabel
self._zlabel = zlabel
self.shuffle = shuffle
self.check_size()
self.check_c()
self.check_mask()
self.check_s()
self.check_discrete()
self.check_legend()
self.check_cmap()
self.check_cmap_scale()
self.check_vmin_vmax()
@property
def x_array(self):
return _squeeze_array(self._x)
@property
def y_array(self):
return _squeeze_array(self._y)
@property
def z_array(self):
return _squeeze_array(self._z) if self._z is not None else None
@property
def size(self):
try:
return self._size
except AttributeError:
self._size = len(self.x_array)
return self._size
@property
def plot_idx(self):
try:
return self._plot_idx
except AttributeError:
self._plot_idx = np.arange(self.size)
if self._mask is not None:
self._plot_idx = self._plot_idx[self._mask]
if self.shuffle:
self._plot_idx = np.random.permutation(self._plot_idx)
return self._plot_idx
@property
def x(self):
return self.x_array[self.plot_idx]
@property
def y(self):
return self.y_array[self.plot_idx]
@property
def z(self):
return self.z_array[self.plot_idx] if self._z is not None else None
@property
def data(self):
if self.z is not None:
return [self.x, self.y, self.z]
else:
return [self.x, self.y]
@property
def _data(self):
if self._z is not None:
return [self.x_array, self.y_array, self.z_array]
else:
return [self.x_array, self.y_array]
@property
def s(self):
if self._s is not None:
if isinstance(self._s, numbers.Number):
return self._s
else:
return self._s[self.plot_idx]
else:
return 200 / np.sqrt(self.size)
def constant_c(self):
"""Check if ``c`` is constant.
Returns
-------
c : ``str`` or ``None``
Either None or a single matplotlib color
"""
if self._c is None or isinstance(self._c, str):
return True
elif hasattr(self._c, "__len__") and len(self._c) == self.size:
# technically if self.size == 3 or 4 then this could be
# interpreted as a single color-like
return False
else:
return mpl.colors.is_color_like(self._c)
def array_c(self):
"""Check if ``c`` is an array of matplotlib colors."""
try:
return self._array_c
except AttributeError:
self._array_c = (not self.constant_c()) and _is_color_array(self._c)
return self._array_c
@property
def _c_masked(self):
if self.constant_c() or self._mask is None:
return self._c
else:
return self._c[self._mask]
@property
def c_unique(self):
"""Get unique values in c to avoid recomputing every time."""
try:
return self._c_unique
except AttributeError:
self._c_unique = np.unique(self._c_masked)
return self._c_unique
@property
def n_c_unique(self):
"""Get the number of unique values in `c`."""
try:
return self._n_c_unique
except AttributeError:
self._n_c_unique = len(self.c_unique)
return self._n_c_unique
@property
def discrete(self):
"""Check if the color array is discrete.
If not provided:
* If c is constant or an array, return None
* If cmap is a dict, return True
* If c has 20 or less unique values, return True
* Otherwise, return False
"""
if self._discrete is not None:
return self._discrete
else:
if self.constant_c() or self.array_c():
return None
else:
if isinstance(self._cmap, dict) or not np.all(
[isinstance(x, numbers.Number) for x in self._c_masked]
):
# cmap dictionary or non-numeric values force discrete
return True
else:
# guess based on number of unique elements
if self.n_c_unique > 20:
return False
else:
# are the unique elements integer-like?
return np.allclose(self.c_unique % 1, 0, atol=1e-4)
@property
def c_discrete(self):
"""Discretize ``c``.
If c is discrete then this converts it to
integers from 0 to `n_c_unique`
"""
if self._c_discrete is None:
if isinstance(self._cmap, dict):
self._labels = np.array(
[k for k in self._cmap.keys() if k in self.c_unique]
)
self._c_discrete = np.zeros_like(self._c, dtype=int)
for i, label in enumerate(self._labels):
self._c_discrete[self._c == label] = i
else:
self._c_discrete = np.zeros_like(self._c, dtype=int)
self._c_discrete[self._mask], self._labels = pd.factorize(
self._c_masked, sort=True
)
return self._c_discrete
@property
def c(self):
if self.constant_c():
return self._c
elif self.array_c() or not self.discrete:
return self._c[self.plot_idx]
else:
# discrete c
return self.c_discrete[self.plot_idx]
@property
def labels(self):
"""Get labels associated with each integer c, if c is discrete."""
if self.constant_c() or self.array_c():
return None
elif self.discrete:
# make sure this exists
self.c_discrete
return self._labels
else:
return None
@property
def legend(self):
if self._legend is not None:
return self._legend
else:
if self.constant_c() or self.array_c():
return False
else:
return True
def list_cmap(self):
"""Check if the colormap is a list."""
return hasattr(self._cmap, "__len__") and not isinstance(
self._cmap, (str, dict)
)
def process_string_cmap(self, cmap):
"""Subset a discrete colormap based on the number of colors if necessary."""
cmap = mpl.cm.get_cmap(cmap)
if self.discrete and cmap.N <= 20 and self.n_c_unique <= cmap.N:
return mpl.colors.ListedColormap(cmap.colors[: self.n_c_unique])
else:
return cmap
@property
def cmap(self):
if self._cmap is not None:
if isinstance(self._cmap, dict):
return mpl.colors.ListedColormap(
[mpl.colors.to_rgba(self._cmap[label]) for label in self.labels]
)
elif self.list_cmap():
return create_colormap(self._cmap)
elif isinstance(self._cmap, str):
return self.process_string_cmap(self._cmap)
else:
return self._cmap
else:
if self.constant_c() or self.array_c():
return None
elif self.discrete:
return colors.tab(n=self.n_c_unique)
else:
return self.process_string_cmap("inferno")
@property
def cmap_scale(self):
if self._cmap_scale is not None:
return self._cmap_scale
else:
if self.discrete or not self.legend:
return None
else:
return "linear"
@property
def _use_norm(self):
return self.cmap_scale is not None and self.cmap_scale != "linear"
@property
def _vmin(self):
if self._vmin_set is not None:
return self._vmin_set
else:
if self.constant_c() or self.array_c() or self.discrete:
return None
else:
return np.nanmin(self.c)
@property
def vmin(self):
if self._use_norm:
return None
else:
return self._vmin
@property
def _vmax(self):
if self._vmax_set is not None:
return self._vmax_set
else:
if self.constant_c() or self.array_c() or self.discrete:
return None
else:
return np.nanmax(self.c)
@property
def vmax(self):
if self._use_norm:
return None
else:
return self._vmax
@property
def norm(self):
if self._use_norm:
return create_normalize(self._vmin, self._vmax, scale=self.cmap_scale)
else:
return None
@property
def extend(self):
if self.legend and not self.discrete:
# migrate this to _ScatterParams
extend_min = np.min(self.c) < self._vmin
extend_max = np.max(self.c) > self._vmax
if extend_min:
return "both" if extend_max else "min"
else:
return "max" if extend_max else "neither"
else:
return None
@property
def subplot_kw(self):
if self.z is not None:
return {"projection": "3d"}
else:
return {}
def check_vmin_vmax(self):
if self.constant_c():
if self._vmin_set is not None or self._vmax_set is not None:
warnings.warn(
"Cannot set `vmin` or `vmax` with constant `c={}`. "
"Setting `vmin = vmax = None`.".format(self.c),
UserWarning,
)
self._vmin_set = None
self._vmax_set = None
elif self.discrete:
if self._vmin_set is not None or self._vmax_set is not None:
warnings.warn(
"Cannot set `vmin` or `vmax` with discrete data. "
"Setting to `None`.",
UserWarning,
)
self._vmin_set = None
self._vmax_set = None
def check_legend(self):
# legend and colorbar are synonyms
if self._colorbar is not None:
if self._legend is not None and self._legend != self._colorbar:
raise ValueError(
"Received conflicting values for synonyms "
"`legend={}` and `colorbar={}`".format(self._legend, self._colorbar)
)
else:
self._legend = self._colorbar
if self._legend:
if self.array_c():
warnings.warn(
"`c` is a color array and cannot be used to create a "
"legend. To interpret these values as labels instead, "
"provide a `cmap` dictionary with label-color pairs.",
UserWarning,
)
self._legend = False
elif self.constant_c():
warnings.warn(
"Cannot create a legend with constant `c={}`".format(self.c),
UserWarning,
)
self._legend = False
def check_size(self):
# check data shape
for d in self._data:
if len(d) != self.size:
raise ValueError(
"Expected all axes of data to have the same length"
". Got {}".format([len(d) for d in self._data])
)
def check_c(self):
if not self.constant_c():
self._c = _squeeze_array(self._c)
if not len(self._c) == self.size:
raise ValueError(
"Expected c of length {} or 1. Got {}".format(
self.size, len(self._c)
)
)
def check_mask(self):
if self._mask is not None:
self._mask = _squeeze_array(self._mask)
if not len(self._mask) == self.size:
raise ValueError(
"Expected mask of length {}. Got {}".format(
self.size, len(self._mask)
)
)
def check_s(self):
if self._s is not None and not isinstance(self._s, numbers.Number):
self._s = _squeeze_array(self._s)
if not len(self._s) == self.size:
raise ValueError(
"Expected s of length {} or 1. Got {}".format(
self.size, len(self._s)
)
)
def check_discrete(self):
if self._discrete is False:
if not np.all([isinstance(x, numbers.Number) for x in self._c]):
raise ValueError("Cannot treat non-numeric data as continuous.")
def check_cmap(self):
if isinstance(self._cmap, dict):
# dictionary cmap
if self.constant_c() or self.array_c():
raise ValueError(
"Expected list-like `c` with dictionary cmap."
" Got {}".format(type(self._c))
)
elif not self.discrete:
raise ValueError("Cannot use dictionary cmap with " "continuous data.")
elif np.any([color not in self._cmap for color in np.unique(self._c)]):
missing = set(np.unique(self._c).tolist()).difference(self._cmap.keys())
raise ValueError(
"Dictionary cmap requires a color "
"for every unique entry in `c`. "
"Missing colors for [{}]".format(
", ".join([str(color) for color in missing])
)
)
elif self.list_cmap():
if self.constant_c() or self.array_c():
raise ValueError(
"Expected list-like `c` with list cmap. "
"Got {}".format(type(self._c))
)
def check_cmap_scale(self):
if self._cmap_scale is not None and self._cmap_scale != "linear":
if self.array_c():
warnings.warn(
"Cannot use non-linear `cmap_scale` with " "`c` as a color array.",
UserWarning,
)
self._cmap_scale = "linear"
elif self.constant_c():
warnings.warn(
"Cannot use non-linear `cmap_scale` with constant "
"`c={}`.".format(self._c),
UserWarning,
)
self._cmap_scale = "linear"
elif self.discrete:
warnings.warn(
"Cannot use non-linear `cmap_scale` with discrete data.",
UserWarning,
)
self._cmap_scale = "linear"
def _label(self, label, values, idx):
if label is False:
return None
elif label is not None:
return label
elif self._label_prefix is not None:
return self._label_prefix + str(idx)
elif label is not False and isinstance(values, pd.Series):
return values.name
else:
return None
@property
def xlabel(self):
return self._label(self._xlabel, self._x, "1")
@property
def ylabel(self):
return self._label(self._ylabel, self._y, "2")
@property
def zlabel(self):
if self._z is None:
return None
else:
return self._label(self._zlabel, self._z, "3")
@utils._with_pkg(pkg="matplotlib", min_version=3)
def scatter(
x,
y,
z=None,
c=None,
cmap=None,
cmap_scale="linear",
s=None,
mask=None,
discrete=None,
ax=None,
legend=None,
colorbar=None,
shuffle=True,
figsize=None,
ticks=True,
xticks=None,
yticks=None,
zticks=None,
ticklabels=True,
xticklabels=None,
yticklabels=None,
zticklabels=None,
label_prefix=None,
xlabel=None,
ylabel=None,
zlabel=None,
title=None,
fontsize=None,
legend_title=None,
legend_loc="best",
legend_anchor=None,
legend_ncol=None,
vmin=None,
vmax=None,
elev=None,
azim=None,
filename=None,
dpi=None,
**plot_kwargs,
):
"""Create a scatter plot.
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better. For easy access, use
`scatter2d` or `scatter3d`.
Parameters
----------
x : list-like
data for x axis
y : list-like
data for y axis
z : list-like, optional (default: None)
data for z axis
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
cmap_scale : {'linear', 'log', 'symlog', 'sqrt'} or `matplotlib.colors.Normalize`,
optional (default: 'linear')
Colormap normalization scale. For advanced use, see
<https://matplotlib.org/users/colormapnorms.html>
s : float, optional (default: None)
Point size. If `None`, set to 200 / sqrt(n_samples)
mask : list-like, optional (default: None)
boolean mask to hide data points
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: None)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar. If `None`, a legend is created where possible
colorbar : bool, optional (default: None)
Synonym for `legend`
shuffle : bool, optional (default: True)
If True. shuffles the order of points on the plot.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
ticks : True, False, or list-like (default: True)
If True, keeps default axis ticks. If False, removes axis ticks.
If a list, sets custom axis ticks
{x,y,z}ticks : True, False, or list-like (default: None)
If set, overrides `ticks`
ticklabels : True, False, or list-like (default: True)
If True, keeps default axis tick labels. If False, removes axis tick labels.
If a list, sets custom axis tick labels
{x,y,z}ticklabels : True, False, or list-like (default: None)
If set, overrides `ticklabels`
label_prefix : str or None (default: None)
Prefix for all axis labels. Axes will be labelled `label_prefix`1,
`label_prefix`2, etc. Can be overriden by setting `xlabel`,
`ylabel`, and `zlabel`.
{x,y,z}label : str, None or False (default : None)
Axis labels. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set
unless the data is a pandas Series, in which case the series name is used.
Override this behavior with `{x,y,z}label=False`
title : str or None (default: None)
axis title. If None, no title is set.
fontsize : float or None (default: None)
Base font size.
legend_title : str (default: None)
title for the colorbar of legend
legend_loc : int or string or pair of floats, default: 'best'
Matplotlib legend location. Only used for discrete data.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_anchor : `BboxBase`, 2-tuple, or 4-tuple
Box that is used to position the legend in conjunction with loc.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_ncol : `int` or `None`, optimal (default: None)
Number of columns to show in the legend.
If None, defaults to a maximum of entries per column.
vmin, vmax : float, optional (default: None)
Range of values to use as the range for the colormap.
Only used if data is continuous
elev : int, optional (default: None)
Elevation angle of viewpoint from horizontal for 3D plots, in degrees
azim : int, optional (default: None)
Azimuth angle in x-y plane of viewpoint for 3D plots, in degrees
filename : str or None (default: None)
file to which the output is saved
dpi : int or None, optional (default: None)
The resolution in dots per inch. If None it will default to the value
savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi
to be the value of the figure. Only used if filename is not None.
**plot_kwargs : keyword arguments
Extra arguments passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `matplotlib.Axes`
axis on which plot was drawn
Examples
--------
>>> import scprep
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> data = np.random.normal(0, 1, [200, 3])
>>> # Continuous color vector
>>> colors = data[:, 0]
>>> scprep.plot.scatter(x=data[:, 0], y=data[:, 1], c=colors)
>>> # Discrete color vector with custom colormap
>>> colors = np.random.choice(['a','b'], data.shape[0], replace=True)
>>> data[colors == 'a'] += 5
>>> scprep.plot.scatter(x=data[:, 0], y=data[:, 1], z=data[:, 2],
... c=colors, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'})
"""
with temp_fontsize(fontsize):
params = _ScatterParams(
x,
y,
z,
c=c,
mask=mask,
discrete=discrete,
cmap=cmap,
cmap_scale=cmap_scale,
vmin=vmin,
vmax=vmax,
s=s,
legend=legend,
colorbar=colorbar,
xlabel=xlabel,
ylabel=ylabel,
zlabel=zlabel,
label_prefix=label_prefix,
shuffle=shuffle,
)
fig, ax, show_fig = _get_figure(ax, figsize, subplot_kw=params.subplot_kw)
# plot!
sc = ax.scatter(
*(params.data),
c=params.c,
cmap=params.cmap,
norm=params.norm,
s=params.s,
vmin=params.vmin,
vmax=params.vmax,
**plot_kwargs,
)
# label axes
label_axis(
ax.xaxis,
_with_default(xticks, ticks),
_with_default(xticklabels, ticklabels),
params.xlabel,
)
label_axis(
ax.yaxis,
_with_default(yticks, ticks),
_with_default(yticklabels, ticklabels),
params.ylabel,
)
if z is not None:
label_axis(
ax.zaxis,
_with_default(zticks, ticks),
_with_default(zticklabels, ticklabels),
params.zlabel,
)
if title is not None:
ax.set_title(title, fontsize=parse_fontsize(None, "xx-large"))
# generate legend
if params.legend:
if params.discrete:
generate_legend(
{
params.labels[i]: sc.cmap(sc.norm(i))
for i in range(len(params.labels))
},
ax=ax,
loc=legend_loc,
bbox_to_anchor=legend_anchor,
title=legend_title,
ncol=legend_ncol,
)
else:
generate_colorbar(
params.cmap,
ax=ax,
vmin=params.vmin,
vmax=params.vmax,
title=legend_title,
extend=params.extend,
scale=sc.norm,
)
# set viewpoint
if z is not None:
ax.view_init(elev=elev, azim=azim)
# save and show
if show_fig:
show(fig)
if filename is not None:
fig.savefig(filename, dpi=dpi)
return ax
@utils._with_pkg(pkg="matplotlib", min_version=3)
def scatter2d(
data,
c=None,
cmap=None,
cmap_scale="linear",
s=None,
mask=None,
discrete=None,
ax=None,
legend=None,
colorbar=None,
shuffle=True,
figsize=None,
ticks=True,
xticks=None,
yticks=None,
ticklabels=True,
xticklabels=None,
yticklabels=None,
label_prefix=None,
xlabel=None,
ylabel=None,
title=None,
fontsize=None,
legend_title=None,
legend_loc="best",
legend_anchor=None,
legend_ncol=None,
filename=None,
dpi=None,
**plot_kwargs,
):
"""Create a 2D scatter plot.
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better.
Parameters
----------
data : array-like, shape=[n_samples, n_features]
Input data. Only the first two components will be used.
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict, list or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a list, expects one color for every
unique value in `c`, otherwise interpolates between given colors for
continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
cmap_scale : {'linear', 'log', 'symlog', 'sqrt'} or `matplotlib.colors.Normalize`,
optional (default: 'linear')
Colormap normalization scale. For advanced use, see
<https://matplotlib.org/users/colormapnorms.html>
s : float, optional (default: None)
Point size. If `None`, set to 200 / sqrt(n_samples)
mask : list-like, optional (default: None)
boolean mask to hide data points
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: None)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar. If `None`, a legend is created where possible.
colorbar : bool, optional (default: None)
Synonym for `legend`
shuffle : bool, optional (default: True)
If True. shuffles the order of points on the plot.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
ticks : True, False, or list-like (default: True)
If True, keeps default axis ticks. If False, removes axis ticks.
If a list, sets custom axis ticks
{x,y}ticks : True, False, or list-like (default: None)
If set, overrides `ticks`
ticklabels : True, False, or list-like (default: True)
If True, keeps default axis tick labels. If False, removes axis tick labels.
If a list, sets custom axis tick labels
{x,y}ticklabels : True, False, or list-like (default: None)
If set, overrides `ticklabels`
label_prefix : str or None (default: None)
Prefix for all axis labels. Axes will be labelled `label_prefix`1,
`label_prefix`2, etc. Can be overriden by setting `xlabel`,
`ylabel`, and `zlabel`.
{x,y}label : str or None (default : None)
Axis labels. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set
unless the data is a pandas Series, in which case the series name is used.
Override this behavior with `{x,y,z}label=False`
title : str or None (default: None)
axis title. If None, no title is set.
fontsize : float or None (default: None)
Base font size.
legend_title : str (default: None)
title for the colorbar of legend
legend_loc : int or string or pair of floats, default: 'best'
Matplotlib legend location. Only used for discrete data.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_anchor : `BboxBase`, 2-tuple, or 4-tuple
Box that is used to position the legend in conjunction with loc.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_ncol : `int` or `None`, optimal (default: None)
Number of columns to show in the legend.
If None, defaults to a maximum of entries per column.
vmin, vmax : float, optional (default: None)
Range of values to use as the range for the colormap.
Only used if data is continuous
filename : str or None (default: None)
file to which the output is saved
dpi : int or None, optional (default: None)
The resolution in dots per inch. If None it will default to the value
savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi
to be the value of the figure. Only used if filename is not None.
**plot_kwargs : keyword arguments
Extra arguments passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `matplotlib.Axes`
axis on which plot was drawn
Examples
--------
>>> import scprep
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> data = np.random.normal(0, 1, [200, 2])
>>> # Continuous color vector
>>> colors = data[:, 0]
>>> scprep.plot.scatter2d(data, c=colors)
>>> # Discrete color vector with custom colormap
>>> colors = np.random.choice(['a','b'], data.shape[0], replace=True)
>>> data[colors == 'a'] += 10
>>> scprep.plot.scatter2d(
data, c=colors, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'}
)
"""
if isinstance(data, list):
data = utils.toarray(data)
if isinstance(data, np.ndarray):
data = | np.atleast_2d(data) | numpy.atleast_2d |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, ParameterTuple, context, set_seed
from mindspore.train import DatasetHelper, connect_network_with_dataset
import mindspore.dataset as ds
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
set_seed(2)
def _exec_preprocess(network, is_train, dataset, dataset_sink_mode, epoch_num, sink_size):
if dataset_sink_mode and not is_train:
dataset.__loop_size__ = 1
dataset_helper = DatasetHelper(
dataset, dataset_sink_mode, sink_size, epoch_num)
if dataset_sink_mode:
network = connect_network_with_dataset(network, dataset_helper)
return dataset_helper, network
def dynamic_shape_sink_process(network, dataset, is_train=True):
dataset_sink_mode = True
sink_size = 1
epoch_num = 1
dataset_helper, network = _exec_preprocess(
network, is_train, dataset, dataset_sink_mode, epoch_num, sink_size)
network.set_train(is_train)
for inputs in dataset_helper:
outputs = network(*inputs)
return outputs
def fixed_shape_process(network, dataset, is_train=True):
network.set_train(is_train)
for inputs in dataset.create_tuple_iterator():
outputs = network(*inputs)
return outputs
def dataset_generator(data_list):
for data in data_list:
yield data
def get_columns(tensor_num):
columns = []
for i in range(tensor_num):
columns.append("data" + str(i))
return columns
def compare(output, expect):
if isinstance(output, (tuple, list)):
assert isinstance(expect, (tuple, list))
for output_, expect_ in zip(output, expect):
if not compare(output_, expect_):
return False
else:
if not np.allclose(output.asnumpy(), expect.asnumpy(), rtol=1.0e-4, atol=1.0e-4):
return False
return True
class GradNetWrtX(nn.Cell):
def __init__(self, net):
super(GradNetWrtX, self).__init__()
self.net = net
self.grad_op = ops.GradOperation(
get_all=True, get_by_list=True, sens_param=True)
self.params = ParameterTuple(net.trainable_params())
def construct(self, *inputs):
gradient_function = self.grad_op(self.net, self.params)
return gradient_function(*inputs)
class ConcatNet(nn.Cell):
def __init__(self, axis):
super(ConcatNet, self).__init__()
self.op = ops.Concat(axis)
def construct(self, x1, x2):
return self.op((x1, x2))
def dynamic_concat_run(is_grad):
axis = 1
dtype = np.float32
data_list = []
for i in [2, 64]:
data = []
data.append(np.random.rand(i, 16).astype(dtype))
data.append(np.random.rand(i, 32).astype(dtype))
if is_grad:
data.append(np.random.rand(i, 48).astype(dtype))
data_list.append(tuple(data))
column_names = get_columns(len(data_list[0]))
dataset = ds.GeneratorDataset(data_list, column_names, shuffle=False)
dynamic_columns = {column_names[0]: [
None, 16], column_names[1]: [None, 32]}
if is_grad:
dynamic_columns[column_names[-1]] = [None, 48]
dataset.set_dynamic_columns(columns=dynamic_columns)
net = ConcatNet(axis)
if is_grad:
net = GradNetWrtX(net)
output = dynamic_shape_sink_process(net, dataset)
output_cmp = fixed_shape_process(net, dataset)
assert compare(output, output_cmp)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_concat_forward():
"""
Feature: Test Concat.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
dynamic_concat_run(False)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_concat_backward():
"""
Feature: Test backward of Concat.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
dynamic_concat_run(True)
class BatchNormNet(nn.Cell):
def __init__(self, c):
super(BatchNormNet, self).__init__()
self.bn = nn.BatchNorm1d(c)
def construct(self, input_data):
x = self.bn(input_data)
return x
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_bachnorm():
"""
Feature: Test BatchNorm and its backward.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
c = 256
dtype = np.float32
data_list = []
for i in [2, 64]:
data = []
data.append( | np.random.rand(i, c) | numpy.random.rand |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.topi.testing
import numpy as np
from tvm.topi.util import get_const_tuple
from tvm.topi.nn.util import get_pad_tuple
from tvm.contrib.pickle_memoize import memoize
import tvm.testing
_depthwise_conv2d_nchw_implement = {
"generic": [(topi.nn.depthwise_conv2d_nchw, topi.generic.schedule_depthwise_conv2d_nchw)],
"arm_cpu": [
(topi.arm_cpu.depthwise_conv2d_nchw, topi.arm_cpu.schedule_depthwise_conv2d_nchw),
(
topi.arm_cpu.depthwise_conv2d_nchw_spatial_pack,
topi.arm_cpu.schedule_depthwise_conv2d_nchw_spatial_pack,
),
],
"gpu": [(topi.cuda.depthwise_conv2d_nchw, topi.cuda.schedule_depthwise_conv2d_nchw)],
"mali": [(topi.mali.depthwise_conv2d_nchw, topi.mali.schedule_depthwise_conv2d_nchw)],
"bifrost": [(topi.nn.depthwise_conv2d_nchw, topi.bifrost.schedule_depthwise_conv2d_nchw)],
"intel_graphics": [
(
topi.intel_graphics.depthwise_conv2d_nchw,
topi.intel_graphics.schedule_depthwise_conv2d_nchw,
)
],
}
_depthwise_conv2d_nhwc_implement = {
"generic": (topi.nn.depthwise_conv2d_nhwc, topi.generic.schedule_depthwise_conv2d_nhwc),
"arm_cpu": (
topi.arm_cpu.compute_depthwise_conv2d_nhwc,
topi.arm_cpu.schedule_depthwise_conv2d_nhwc,
),
"gpu": (topi.nn.depthwise_conv2d_nhwc, topi.cuda.schedule_depthwise_conv2d_nhwc),
}
def depthwise_conv2d_with_workload_nchw(
batch, in_channel, in_height, channel_multiplier, filter_height, stride, padding, dilation=1
):
in_width = in_height
filter_channel = in_channel
filter_width = filter_height
stride_h = stride_w = stride
if dilation == 1:
# here we transform the padding argument from 'str' to 'tuple' ,
# because we need this to match the "workload" tuple to the records in TopHub
pad_h, pad_w, _, _ = get_pad_tuple(padding, (filter_height, filter_width))
padding_args = (pad_h, pad_w)
else:
padding_args = padding
# placeholder
Input = te.placeholder((batch, in_channel, in_height, in_width), name="Input")
Filter = te.placeholder(
(filter_channel, channel_multiplier, filter_height, filter_width), name="Filter"
)
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
dtype = "float32"
def check_device(device, ctx):
print("Running on target: %s" % device)
impl_list = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nchw_implement)[:]
if device == "llvm" and channel_multiplier == 1 and dilation == 1:
impl_list.append(
(topi.x86.depthwise_conv2d_nchw, topi.x86.schedule_depthwise_conv2d_nchw)
)
for fcompute, fschedule in impl_list:
with tvm.target.Target(device):
# declare
DepthwiseConv2d = fcompute(
Input, Filter, (stride_h, stride_w), padding_args, dilation, dtype
)
ScaleShift = topi.nn.scale_shift_nchw(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# schedule
s1 = fschedule(DepthwiseConv2d)
s2 = fschedule(ScaleShift)
s3 = fschedule(Relu)
# build the kernels
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare pod type for test data closure
input_shape = get_const_tuple(Input.shape)
filter_shape = get_const_tuple(Filter.shape)
scale_shape = get_const_tuple(Scale.shape)
shift_shape = get_const_tuple(Shift.shape)
scale_shift_shape = get_const_tuple(ScaleShift.shape)
# Use memoize, pickle the test data for next time use.
@memoize("topi.tests.test_topi_depthwise_conv2d.nchw")
def get_ref_data():
input_np = np.random.uniform(size=input_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
dilated_filter_np = tvm.topi.testing.dilate_python(
filter_np, (1, 1, dilation, dilation)
)
scale_np = np.random.uniform(size=scale_shape).astype(dtype)
shift_np = np.random.uniform(size=shift_shape).astype(dtype)
# correctness with scipy
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
input_np, dilated_filter_np, stride, padding
)
scale_shift_scipy = np.zeros(shape=scale_shift_shape)
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, c, :, :] = (
depthwise_conv2d_scipy[:, c, :, :] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
return (
input_np,
filter_np,
scale_np,
shift_np,
depthwise_conv2d_scipy,
scale_shift_scipy,
relu_scipy,
)
# Get the test data
(
input_np,
filter_np,
scale_np,
shift_np,
depthwise_conv2d_scipy,
scale_shift_scipy,
relu_scipy,
) = get_ref_data()
input_tvm = tvm.nd.array(input_np, ctx)
filter_tvm = tvm.nd.array(filter_np, ctx)
scale_tvm = tvm.nd.array(scale_np, ctx)
shift_tvm = tvm.nd.array(shift_np, ctx)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype),
ctx,
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), ctx
)
relu_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), ctx
)
# launch kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, ctx, number=1)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# launch kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, ctx, number=1)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# launch kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, ctx, number=1)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
tvm.testing.assert_allclose(
depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5
)
tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5)
for device, ctx in tvm.testing.enabled_targets():
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device, ctx)
def depthwise_conv2d_with_workload_nhwc(
batch, in_channel, in_height, channel_multiplier, filter_height, stride_h, padding, dilation=1
):
in_width = in_height
filter_channel = in_channel
filter_width = filter_height
stride_w = stride_h
if dilation == 1:
# here we transform the padding argument from 'str' to 'tuple' ,
# because we need this to match the "workload" tuple to the records in TopHub
pad_h, pad_w, _, _ = get_pad_tuple(padding, (filter_height, filter_width))
padding_args = (pad_h, pad_w)
else:
padding_args = padding
# placeholder
Input = te.placeholder((batch, in_height, in_width, in_channel), name="Input")
Filter = te.placeholder(
(filter_height, filter_width, filter_channel, channel_multiplier), name="Filter"
)
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
dtype = "float32"
def check_device(device, ctx):
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nhwc_implement)
with tvm.target.Target(device):
# declare
DepthwiseConv2d = fcompute(
Input, Filter, (stride_h, stride_w), padding_args, dilation, dtype
)
ScaleShift = topi.nn.scale_shift_nhwc(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# schedule
s1 = fschedule(DepthwiseConv2d)
s2 = fschedule(ScaleShift)
s3 = fschedule(Relu)
# build the kernels
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare pod type for test data closure
input_shape = get_const_tuple(Input.shape)
filter_shape = get_const_tuple(Filter.shape)
scale_shape = get_const_tuple(Scale.shape)
shift_shape = get_const_tuple(Shift.shape)
scale_shift_shape = get_const_tuple(ScaleShift.shape)
# Use memoize, pickle the test data for next time use.
@memoize("topi.tests.test_topi_depthwise_conv2d.nhwc.v2")
def get_ref_data():
input_np = np.random.uniform(size=input_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
dilated_filter_np = tvm.topi.testing.dilate_python(
filter_np, (dilation, dilation, 1, 1)
)
scale_np = np.random.uniform(size=scale_shape).astype(dtype)
shift_np = np.random.uniform(size=shift_shape).astype(dtype)
# correctness with scipy
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(
input_np, dilated_filter_np, stride=[stride_h, stride_w], padding=padding
)
scale_shift_scipy = np.zeros(shape=scale_shift_shape)
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, :, :, c] = (
depthwise_conv2d_scipy[:, :, :, c] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
return (
input_np,
filter_np,
scale_np,
shift_np,
depthwise_conv2d_scipy,
scale_shift_scipy,
relu_scipy,
)
# Get the test data
(
input_np,
filter_np,
scale_np,
shift_np,
depthwise_conv2d_scipy,
scale_shift_scipy,
relu_scipy,
) = get_ref_data()
# prepare data
input_tvm = tvm.nd.array(input_np, ctx)
filter_tvm = tvm.nd.array(filter_np, ctx)
scale_tvm = tvm.nd.array(scale_np, ctx)
shift_tvm = tvm.nd.array(shift_np, ctx)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype), ctx
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), ctx
)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), ctx)
# launch kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, ctx, number=1)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# launch kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, ctx, number=1)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# launch kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, ctx, number=1)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(
depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5
)
tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5)
for device, ctx in tvm.testing.enabled_targets():
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device, ctx)
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel // bn, bn, height, width))
data = | np.transpose(data, (0, 1, 3, 4, 2)) | numpy.transpose |
# coding: utf-8
import numpy as np
from time import time
import sys
def find_primes(maxN):
x1 = np.arange(maxN + 1, dtype=np.int64)
b1 = np.zeros(np.shape(x1), dtype=np.bool)
b1[x1 > 1] = True
maxN2 = np.int64(maxN**(0.5) + 1)
for n in range(2, maxN2 + 1):
b1[2*n::n] = False
return x1[b1]
def prime_factors(N):
pNums = find_primes(N//2 + 1)
pExps = np.zeros( | np.shape(pNums) | numpy.shape |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestFillConstantOp1(OpTest):
def setUp(self):
'''Test fill_constant op with specified value
'''
self.op_type = "fill_constant"
self.inputs = {}
self.attrs = {'shape': [123, 92], 'value': 3.8}
self.outputs = {'Out': | np.full((123, 92), 3.8) | numpy.full |
'''This module provides the figures for the accompanying Jupyter notebook'''
import matplotlib.pyplot as plt, numpy as np, pandas as pd
from IPython.display import display
from scipy.interpolate import lagrange, interp1d, Akima1DInterpolator, CubicSpline, PchipInterpolator, CubicHermiteSpline
def figure1():
plt.figure(figsize=(6.0, 9.0))
xi = np.arange(0, 10, 2) + np.random.random(5) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = np.linspace(0, 10, 51)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.subplot(4, 2, (1, 4))
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k', xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('original')
plt.xticks([], [])
for figure, kind in enumerate(('zero', 'linear', 'quadratic', 'cubic')):
plt.subplot(4, 2, figure + 5)
plt.axhline(0.0, color='k', lw=0.5)
spline = interp1d(xi, yi, kind=kind)
for i in range(4):
x = np.linspace(xi[i], xi[i + 1], 51)
plt.plot(x, spline(x), ':')
if figure > 1:
plt.xlabel('$x$')
else:
plt.xticks([], [])
if figure % 2 == 0: plt.ylabel('$y$')
plt.plot(xi, yi, 'ok')
plt.title(kind + ' spline')
return 'Types of splines'
def figure2():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
spline = Akima1DInterpolator(xi, yi)
x = np.linspace(-5.5, 5.5, 111)
y = spline(x)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
plt.plot(x, y, '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Akima1DInterpolator')
def figure3():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
x = np.linspace(-5.5, 5.5, 111)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
for bc_type in ('not-a-knot', 'periodic', 'clamped', 'natural'):
spline = CubicSpline(xi, yi, bc_type=bc_type)
y = spline(x)
plt.plot(x, y, '-', label=bc_type)
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('CubicSpline')
plt.legend()
def figure4():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
spline = PchipInterpolator(xi, yi)
x = np.linspace(-5.5, 5.5, 111)
y = spline(x)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
plt.plot(x, y, '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('PchipInterpolator')
def figure5():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
dyi = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
spline = CubicHermiteSpline(xi, yi, dyi)
x = np.linspace(-5.5, 5.5, 111)
y = spline(x)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
plt.plot(x, y, '-')
plt.plot(xi, yi, 'ok')
plt.plot(xi[np.newaxis, :] + np.array([[-.25], [.25]]), yi[np.newaxis, :] + np.array([[-.25], [.25]]), '-k')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('CubicHermiteSpline')
def figure6():
xi = np.arange(0, 10, 2) + np.random.random(5) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
spline = interp1d(xi, yi, kind='nearest', fill_value='extrapolate')
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot(x, spline(x), '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Nearest-neighbor spline')
def figure7():
from scipy.interpolate import interp1d
xi, yi = np.array([0.0, 1.0, 2.0, 3.0]), np.array([0.0, 0.5, 2.0, 0.5])
spline = interp1d(xi, yi, kind='nearest', fill_value='extrapolate')
x = np.linspace(-0.2, 3.2, 103)
plt.axhline(0.0, color='k', lw=0.5); plt.axvline(0.0, color='k', lw=0.5)
plt.plot(xi, yi, 'ok')
plt.plot(x, spline(x), ':k')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Example')
plt.grid(True); plt.ylim(-3.0, 3.0)
def figure8():
xi = np.arange(0, 10, 2) + np.random.random(5) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
spline = interp1d(xi, yi, kind='linear', fill_value='extrapolate')
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot(x, spline(x), '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Linear spline')
def figure9():
xi = np.array([3.0, 7.0])
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot([0.0, 10.0], [yi[0], yi[0]], '-', label='$q_i(x)$')
plt.plot([0.0, 10.0], [yi[1], yi[1]], '-', label='$q_{i+1}(x)$')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Weighted averaging')
plt.legend()
def figure10():
xi = np.array([3.0, 7.0])
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.figure(figsize=(6.0, 6.0))
plt.subplot(3, 1, (1, 2))
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot([0.0, 10.0], [yi[0], yi[0]], '-', label='$q_i(x)$')
plt.plot([0.0, 10.0], [yi[1], yi[1]], '-', label='$q_{i+1}(x)$')
plt.plot(xi, yi, 'ok')
plt.ylabel('$y$')
plt.title('Weighted averaging')
plt.legend(); plt.xticks([], [])
plt.subplot(3, 1, 3)
plt.plot([0.0, xi[0], xi[1], 10.0], [1.0, 1.0, 0.0, 0.0], '-', label='$w_i(x)$')
plt.plot([0.0, xi[0], xi[1], 10.0], [0.0, 0.0, 1.0, 1.0], '-', label='$1-w_i(x)$')
plt.xlabel('$x$'); plt.ylabel('$w$')
plt.legend()
return 'Weighted averaging'
def figure11():
xi = np.array([3.0, 7.0])
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
spline = interp1d(xi, yi, kind='linear')
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.figure(figsize=(6.0, 6.0))
plt.subplot(3, 1, (1, 2))
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot([0.0, 10.0], [yi[0], yi[0]], '-', label='$q_i(x)$')
plt.plot([0.0, 10.0], [yi[1], yi[1]], '-', label='$q_{i+1}(x)$')
plt.plot(xi, yi, '--', label='$p_i(x)$')
plt.plot(xi, yi, 'ok')
plt.ylabel('$y$')
plt.title('Weighted averaging')
plt.legend(); plt.xticks([], [])
plt.subplot(3, 1, 3)
plt.plot([0.0, xi[0], xi[1], 10.0], [1.0, 1.0, 0.0, 0.0], '-', label='$w_i(x)$')
plt.plot([0.0, xi[0], xi[1], 10.0], [0.0, 0.0, 1.0, 1.0], '-', label='$w_{i+1}(x)$')
plt.xlabel('$x$'); plt.ylabel('$w$')
plt.legend()
return 'Weighted averaging'
def figure12():
xi, yi = np.array([0.0, 1.0, 2.0, 3.0]), np.array([0.0, 0.5, 2.0, 0.5])
spline = interp1d(xi, yi, kind='linear', fill_value='extrapolate')
x = np.linspace(-0.2, 3.2, 103)
plt.axhline(0.0, color='k', lw=0.5); plt.axvline(0.0, color='k', lw=0.5)
plt.plot(xi, yi, 'ok')
plt.plot(x, spline(x), ':k')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Example')
plt.grid(True); plt.ylim(-3.0, 3.0)
def figure13():
xi = np.arange(1, 9, 2) + np.random.random(4) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
p1 = lagrange(xi[:-1], yi[:-1])(x)
p2 = lagrange(xi[1:], yi[1:])(x)
plt.figure(figsize=(6.0, 6.0))
plt.subplot(3, 1, (1, 2))
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot(x, p1, '-', label='$q_i(x)$')
plt.plot(x, p2, '-', label='$q_{i+1}(x)$')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Weighted averaging')
plt.legend(); plt.xticks([], []); plt.ylim((-0.3, 3.0))
plt.subplot(3, 1, 3)
plt.plot([0.0, xi[1], xi[2], 10.0], [1.0, 1.0, 0.0, 0.0], '-', label='$w_i(x)$')
plt.plot([0.0, xi[1], xi[2], 10.0], [0.0, 0.0, 1.0, 1.0], '-', label='$1-w_i(x)$')
plt.xlabel('$x$'); plt.ylabel('$w$')
plt.legend()
return 'Weighted averaging'
def figure14():
xi = np.arange(1, 9, 2) + np.random.random(4) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = | np.linspace(0, 10, 201) | numpy.linspace |
# -*- coding: utf-8 -*-
#
# Authors: Swolf <<EMAIL>>
# Date: 2021/9/18
# License: MIT License
"""
SSCOR.
"""
from typing import Optional, List, Tuple
from functools import partial
import numpy as np
from scipy.linalg import eigh, cholesky, inv
from numpy import ndarray
from sklearn.base import BaseEstimator, TransformerMixin
from joblib import Parallel, delayed
from .base import robust_pattern, FilterBank
def sscor_kernel(X: ndarray,
y: Optional[ndarray] = None,
n_jobs: Optional[int] = None) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel part in SSCOR algorithm based on paper[1]_., [2]_.
Modified from https://github.com/mnakanishi/TRCA-SSVEP/blob/master/src/train_sscor.m
Parameters
----------
X : ndarray
EEG data assuming removing mean, shape (n_trials, n_channels, n_samples)
y : ndarray
labels, shape (n_trials, ), not used here
n_jobs: int, optional
the number of jobs to use, default None
Returns
-------
W: ndarray
filters, shape (n_channels, n_filters)
D: ndarray
eigenvalues in descending order
A: ndarray
spatial patterns, shape (n_channels, n_filters)
References
----------
.. [1] <NAME>, <NAME>. Designing a sum of squared correlations framework for enhancing SSVEP-based BCIs[J]. IEEE Transactions on Neural Systems and Rehabilitation Engineering, 2019, 27(10): 2044-2050.
.. [2] <NAME>, <NAME>. Correction to “Designing a Sum of Squared Correlations Framework for Enhancing SSVEP Based BCIs”[J]. IEEE Transactions on Neural Systems and Rehabilitation Engineering, 2020, 28(4): 1044-1045.
"""
X = np.copy(X)
X = np.reshape(X, (-1, *X.shape[-2:]))
X = X - np.mean(X, axis=-1, keepdims=True)
mean_X = np.mean(X, axis=0)
K1 = cholesky(mean_X@mean_X.T) # upper-triangular X=K.T@K
iK1 = inv(K1)
xC = [email protected](X, axes=(0, 2, 1))
C = [email protected](X, axes=(0, 2, 1))
def target(iK1, xCi, Ci):
Ki = cholesky(Ci)
Gi = iK1.T@xCi@inv(Ki)
return Gi.T@Gi
target = partial(target, iK1)
G_T_G = np.sum(Parallel(n_jobs=n_jobs)(delayed(target)(xCi, Ci) for xCi, Ci in zip(xC, C)), axis=0)
D, W = eigh(G_T_G)
ind = np.argsort(D)[::-1]
D, W = D[ind], W[:, ind]
W = iK1@W
A = robust_pattern(W, G_T_G, W.T@G_T_G@W)
return W, D, A
def sscor_feature(W: ndarray, X: ndarray,
n_components: int = 1) -> ndarray:
"""Return sscor features.
Modified from https://github.com/mnakanishi/TRCA-SSVEP/blob/master/src/test_sscor.m
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 1
Returns
-------
ndarray
features of shape (n_trials, n_components, n_samples)
Raises
------
ValueError
n_components should less than half of the number of channels
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
X = np.reshape(X, (-1, *X.shape[-2:]))
X = X - np.mean(X, axis=-1, keepdims=True)
features = | np.matmul(W[:, :n_components].T, X) | numpy.matmul |
# -*- coding: utf-8 -*-
# Based on the work of <NAME>, 2009-2011 (<EMAIL>)
# Department of Biomedical Engineering and Computational Science
# Aalto University School of Science
import copy
import numpy as np
import matplotlib as mpl
from matplotlib.pyplot import text
from matplotlib.patches import Rectangle
from matplotlib.colors import ColorConverter
class AlluvialDiagram:
def __init__(self, ax, module_sizes_list, ribbon_size_matrix_list,
module_label_matrix=None, module_colors_list=None,
ribbon_bglim=20, threshold=0.0, rainbow=False):
"""
Plot an alluvial diagram to the ax given as parameter.
Parameters
----------
ax : a matplotlib.axes object
module_sizes_list : 2D list
a 2D list, ``module_sizes_list[i][j]'' is the size of module j in
time i
ribbon_size_matrix_list : 3D list
a 3D list, which describes how the groups change
element with time, ``ribbon_size_matrix_list[i][j][k]``, should
correspond to the flow from module_sizes_list[i][j] to
module_sizes_list[i+1][k], each flow is a tuple, first element is
the outflow and second element is the inflow
module_label_matrix : 2D list, optional
labels for each module
module_colors_list : iterable, optional
colors for the first (left) modules (defaulting to gray)
ribbon_bglim : float, optional
if ribbon contains less than this number of nodes (or other 'mass')
it is drawn on the background to avoid visual clutter
"""
self.ax = ax
self.ribbon_size_matrix_list = ribbon_size_matrix_list
self.module_sizes_list = module_sizes_list
self.module_label_matrix = module_label_matrix
self.module_colors_list = module_colors_list
self.ribbon_bglim = ribbon_bglim
self.threshold = threshold
self.rainbow = rainbow
def setting_parameters(self,):
self.ax.set_xlim([0, 2])
self.ax.set_ylim([-0.05, 1])
self.ax.set_xticks([])
self.ax.set_yticks([])
for loc, spine in self.ax.spines.items():
spine.set_color('none') # don't draw spine
if self.module_colors_list is None:
self.module_colors_list = [["gray"] * len(module_sizes) for module_sizes in self.module_sizes_list]
assert len(self.module_sizes_list) == len(self.module_colors_list)
for sizes, colors in zip(self.module_sizes_list, self.module_colors_list):
assert len(sizes) == len(colors)
max_n_modules = np.max([len(sizes) for sizes in self.module_sizes_list])
module_size_sum = np.max([np.sum(sizes) for sizes in self.module_sizes_list])
self.vertical_pad_btw_modules = 0.010 # percent of
self.vertical_pad_up_and_below = 0.00
self.horizontal_pad_lr = 0.0
self.individual_node_size = (1 - 2 * self.vertical_pad_up_and_below -
self.vertical_pad_btw_modules *
(max_n_modules - 1)) / module_size_sum
self.module_width = (1 - 2 * self.horizontal_pad_lr) / (len(self.ribbon_size_matrix_list) + 1) # should be in range [0,0.5-horizontal_pad_lr
blank_width = (1 - 2 * self.horizontal_pad_lr) / len(self.ribbon_size_matrix_list)
self.mw = self.module_width
self.bw = blank_width
# mwnsf: module width non shaded fraction
#(to be able to differ from fully shaded to non-shaded)
self.mwnsf = 0.1
# plot first modules
def plot_blocks(self,):
iteration_list = [
[module_sizes, module_colors] for module_sizes, module_colors in zip(self.module_sizes_list, self.module_colors_list)
]
# for storing the start y coordinates
self.module_y_starts_list = [[] for _ in range(len(self.module_sizes_list))]
self.module_heights_list = [[] for _ in range(len(self.module_sizes_list))]
# plot modules
for i, iteration_data in enumerate(iteration_list):
module_sizes, module_colors = iteration_data
module_y_starts = self.module_y_starts_list[i]
module_heights = self.module_heights_list[i]
current_y = self.vertical_pad_up_and_below
rect_x_start = self.horizontal_pad_lr + i * (self.mw + self.bw)
for j in range(len(module_sizes)):
module_size = module_sizes[j]
color = module_colors[j]
module_y_starts.append(current_y)
module_height = self.individual_node_size * module_size
module_heights.append(module_height)
rect = Rectangle((rect_x_start, current_y), self.module_width,
module_height, fc=color, ec="0.85")
self.ax.add_patch(rect)
if self.module_label_matrix is not None:
text(rect_x_start, current_y, self.module_label_matrix[i][j], fontsize=7)
current_y += module_height + self.vertical_pad_btw_modules
def plot_ribbons(self,):
module_y_ends_list = copy.deepcopy(self.module_y_starts_list)
curvature_param = 0.6
# plot ribbons in order of biggest modules first?
zorder = 0
for t in range(len(self.ribbon_size_matrix_list)):
for i in range(len(self.ribbon_size_matrix_list[t])):
for j in range(len(self.ribbon_size_matrix_list[t][i])):
ribbon_size = self.ribbon_size_matrix_list[t][i][j]
if (ribbon_size[0] == 0.0) | (ribbon_size[1] == 0.0):
continue
ystart1 = self.module_y_starts_list[t][i]
yend1 = ystart1 + ribbon_size[0] * self.module_heights_list[t][i]
self.module_y_starts_list[t][i] = yend1
ystart2 = module_y_ends_list[t + 1][j]
yend2 = ystart2 + ribbon_size[1] * self.module_heights_list[t+1][j]
module_y_ends_list[t + 1][j] = yend2
# the points needed for the bezier
bezier_verts1 = [
(self.horizontal_pad_lr + t * (self.mw + self.bw) + self.module_width, ystart1), # P0
(self.horizontal_pad_lr + t * (self.mw + self.bw) + self.module_width + curvature_param * self.bw, ystart1), # P1
(self.horizontal_pad_lr + (t + 1) * (self.mw + self.bw) - curvature_param * self.bw, ystart2), # P2
(self.horizontal_pad_lr + (t + 1) * (self.mw + self.bw), ystart2), # P3
]
bezier_verts2 = [
(self.horizontal_pad_lr + t * (self.mw + self.bw) + self.module_width, yend1), # P0
(self.horizontal_pad_lr + t * (self.mw + self.bw) + self.module_width + curvature_param * self.bw, yend1), # P1
(self.horizontal_pad_lr + (t + 1) * (self.mw + self.bw) - curvature_param * self.bw, yend2), # P2
(self.horizontal_pad_lr + (t + 1) * (self.mw + self.bw), yend2), # P3
]
if max(ribbon_size) < self.ribbon_bglim:
use_zorder = -10000 - j
else:
use_zorder = zorder
if (ribbon_size[0] > self.threshold) & (ribbon_size[1] > self.threshold):
_plot_ribbon_using_bezier(self.ax, use_zorder, bezier_verts1,
bezier_verts2,)
def _plot_ribbon_using_bezier(ax, zorder, points1, points2, color1="gray",
color2="gray", lw=1):
""" Draw ribbon for alluvial diagram (see plot_alluvial)
Parameters
----------
ax : a matplotlib.axes object
zorder : float
the zorder for the ribbon
points1 : iterable of float tuples
the points, which determine the first line of the Bezier ribbon
points2 : iterable of float tuples
the points, which determine the second line of the Bezier ribbon
color1 : a matplotlib compliant color definition
color for the left side of the ribbon
color1 : a matplotlib compliant color definition
color for the right side of the ribbon
lw : float
linewidth for the bezier borders
"""
cc = ColorConverter()
color1 = np.array(cc.to_rgba(color1))
color2 = np.array(cc.to_rgba(color2))
tRange = np.linspace(0, 1, 100)
xpointsList = []
ypointsList = []
for points in [points1, points2]:
points = np.array(points)
p1 = points[0]
p2 = points[1]
p3 = points[2]
p4 = points[3]
allPoints = (p1[:, np.newaxis] * (1 - tRange) ** 3 + p2[:, np.newaxis]
* (3 * (1 - tRange) ** 2 * tRange) + p3[:, np.newaxis] *
(3 * (1 - tRange) * tRange ** 2) + p4[:, np.newaxis] *
tRange ** 3)
xpoints = allPoints[0]
xpointsList.append(xpoints)
ypoints = allPoints[1]
ypointsList.append(ypoints)
ax.plot(xpoints, ypoints, "0.85", lw=lw, zorder=zorder + 0.5)
xpoints = xpointsList[0]
if (mpl.colors.colorConverter.to_rgba_array(color1) ==
mpl.colors.colorConverter.to_rgba_array(color2)).all():
ax.fill_between(xpoints, ypointsList[0], ypointsList[1], lw=lw,
facecolor=color1, edgecolor=color1, zorder=zorder)
else:
for i in range(len(tRange) - 1):
#mean = (tRange[i]+tRange[i+1])*0.5
xnow = | np.mean(xpoints[i:i + 2]) | numpy.mean |
#!/usr/bin/python
import rosbag
import rospy
import yaml
import numpy as np
from tf.transformations import euler_from_quaternion
topic_type_dict = {}
msg_types = ['nav_msgs/Odometry', 'sensor_msgs/Imu', 'geometry_msgs/PoseStamped',
'quadrotor_msgs/PositionCommand', 'quadrotor_msgs/TRPYCommand',
'quadrotor_msgs/SO3Command', 'sensor_msgs/Range',
'geometry_msgs/PoseWithCovarianceStamped']
var_types = ['x', 'y', 'z', 'vx', 'vy', 'vz',
'acc_x', 'acc_y', 'acc_z',
'roll', 'pitch', 'yaw',
'ang_vel_x', 'ang_vel_y', 'ang_vel_z']
def read_bag(bagfile):
global inbag
inbag = rosbag.Bag(bagfile, 'r')
return read_topic_type()
def read_topic_type():
info_dict = yaml.load(inbag._get_yaml_info())
for x in info_dict['topics']:
topic_type_dict[x['topic']] = x['type']
return topic_type_dict
def read_msg(topics):
data = {}
if len(topics) > 0:
for topic, msg, type in inbag.read_messages():
if topics.count(topic):
if topic_type_dict[topic] == 'nav_msgs/Odometry':
data = update_odometry(data, topic, msg)
elif topic_type_dict[topic] == 'sensor_msgs/Imu':
data = update_imu(data, topic, msg)
elif topic_type_dict[topic] == 'geometry_msgs/PoseStamped':
data = update_pose(data, topic, msg.pose, msg.header)
elif topic_type_dict[topic] == 'quadrotor_msgs/PositionCommand':
data = update_pose_cmd(data, topic, msg)
elif topic_type_dict[topic] == 'geometry_msgs/PoseWithCovarianceStamped':
data = update_pose(data, topic, msg.pose.pose, msg.header)
return data
def update_odometry(data, topic, msg):
quat = [msg.pose.pose.orientation.x, msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z, msg.pose.pose.orientation.w]
[r, p, y] = euler_from_quaternion(quat)
if topic in data:
data[topic]['x'] = np.append(data[topic]['x'], msg.pose.pose.position.x)
data[topic]['y'] = np.append(data[topic]['y'], msg.pose.pose.position.y)
data[topic]['z'] = np.append(data[topic]['z'], msg.pose.pose.position.z)
data[topic]['vx'] = np.append(data[topic]['vx'], msg.twist.twist.linear.x)
data[topic]['vy'] = np.append(data[topic]['vy'], msg.twist.twist.linear.y)
data[topic]['vz'] = np.append(data[topic]['vz'], msg.twist.twist.linear.z)
data[topic]['roll'] = np.append(data[topic]['roll'], r)
data[topic]['pitch'] = np.append(data[topic]['pitch'], p)
data[topic]['yaw'] = np.append(data[topic]['yaw'], y)
data[topic]['ang_vel_x'] = np.append(data[topic]['ang_vel_x'], msg.twist.twist.angular.x)
data[topic]['ang_vel_y'] = np.append(data[topic]['ang_vel_y'], msg.twist.twist.angular.y)
data[topic]['ang_vel_z'] = np.append(data[topic]['ang_vel_z'], msg.twist.twist.angular.z)
data[topic]['t'] = np.append(data[topic]['t'], msg.header.stamp.to_sec())
else:
data[topic] = {}
data[topic]['x'] = np.array([msg.pose.pose.position.x])
data[topic]['y'] = np.array([msg.pose.pose.position.y])
data[topic]['z'] = np.array([msg.pose.pose.position.z])
data[topic]['vx'] = np.array([msg.twist.twist.linear.x])
data[topic]['vy'] = np.array([msg.twist.twist.linear.y])
data[topic]['vz'] = np.array([msg.twist.twist.linear.z])
data[topic]['ang_vel_x'] = np.array([msg.twist.twist.angular.x])
data[topic]['ang_vel_y'] = np.array([msg.twist.twist.angular.y])
data[topic]['ang_vel_z'] = np.array([msg.twist.twist.angular.z])
data[topic]['roll'] = np.array([r])
data[topic]['pitch'] = np.array([p])
data[topic]['yaw'] = np.array([y])
data[topic]['t'] = np.array([msg.header.stamp.to_sec()])
return data
def update_pose(data, topic, msg, header):
quat = [msg.orientation.x, msg.orientation.y,
msg.orientation.z, msg.orientation.w]
[r, p, y] = euler_from_quaternion(quat)
if topic in data:
data[topic]['x'] = np.append(data[topic]['x'], msg.position.x)
data[topic]['y'] = np.append(data[topic]['y'], msg.position.y)
data[topic]['z'] = np.append(data[topic]['z'], msg.position.z)
data[topic]['roll'] = np.append(data[topic]['roll'], r)
data[topic]['pitch'] = np.append(data[topic]['pitch'], p)
data[topic]['yaw'] = np.append(data[topic]['yaw'], y)
data[topic]['t'] = np.append(data[topic]['t'], header.stamp.to_sec())
else:
data[topic] = {}
data[topic]['x'] = np.array([msg.position.x])
data[topic]['y'] = np.array([msg.position.y])
data[topic]['z'] = np.array([msg.position.z])
data[topic]['roll'] = np.array([r])
data[topic]['pitch'] = np.array([p])
data[topic]['yaw'] = np.array([y])
data[topic]['t'] = np.array([header.stamp.to_sec()])
return data
def update_imu(data, topic, msg):
quat = [msg.orientation.x, msg.orientation.y,
msg.orientation.z, msg.orientation.w]
[r, p, y] = euler_from_quaternion(quat)
if topic in data:
data[topic]['acc_x'] = np.append(data[topic]['acc_x'], msg.linear_acceleration.x)
data[topic]['acc_y'] = np.append(data[topic]['acc_y'], msg.linear_acceleration.y)
data[topic]['acc_z'] = np.append(data[topic]['acc_z'], msg.linear_acceleration.z)
data[topic]['ang_vel_x'] = np.append(data[topic]['ang_vel_x'], msg.angular_velocity.x)
data[topic]['ang_vel_y'] = np.append(data[topic]['ang_vel_y'], msg.angular_velocity.y)
data[topic]['ang_vel_z'] = np.append(data[topic]['ang_vel_z'], msg.angular_velocity.z)
data[topic]['roll'] = np.append(data[topic]['roll'], r)
data[topic]['pitch'] = np.append(data[topic]['pitch'], p)
data[topic]['yaw'] = np.append(data[topic]['yaw'], y)
data[topic]['t'] = np.append(data[topic]['t'], msg.header.stamp.to_sec())
else:
data[topic] = {}
data[topic]['acc_x'] = np.array([msg.linear_acceleration.x])
data[topic]['acc_y'] = np.array([msg.linear_acceleration.y])
data[topic]['acc_z'] = np.array([msg.linear_acceleration.z])
data[topic]['ang_vel_x'] = np.array([msg.angular_velocity.x])
data[topic]['ang_vel_y'] = np.array([msg.angular_velocity.y])
data[topic]['ang_vel_z'] = np.array([msg.angular_velocity.z])
data[topic]['roll'] = np.array([r])
data[topic]['pitch'] = np.array([p])
data[topic]['yaw'] = np.array([y])
data[topic]['t'] = np.array([msg.header.stamp.to_sec()])
return data
def update_pose_cmd(data, topic, msg):
if topic in data:
data[topic]['x'] = np.append(data[topic]['x'], msg.position.x)
data[topic]['y'] = np.append(data[topic]['y'], msg.position.y)
data[topic]['z'] = np.append(data[topic]['z'], msg.position.z)
data[topic]['vx'] = np.append(data[topic]['vx'], msg.velocity.x)
data[topic]['vy'] = np.append(data[topic]['vy'], msg.velocity.y)
data[topic]['vz'] = np.append(data[topic]['vz'], msg.velocity.z)
data[topic]['acc_x'] = np.append(data[topic]['acc_x'], msg.acceleration.x)
data[topic]['acc_y'] = np.append(data[topic]['acc_y'], msg.acceleration.y)
data[topic]['acc_z'] = np.append(data[topic]['acc_z'], msg.acceleration.z)
data[topic]['yaw'] = np.append(data[topic]['yaw'], msg.yaw)
data[topic]['t'] = np.append(data[topic]['t'], msg.header.stamp.to_sec())
else:
data[topic] = {}
data[topic]['x'] = np.array([msg.position.x])
data[topic]['y'] = np.array([msg.position.y])
data[topic]['z'] = np.array([msg.position.z])
data[topic]['vx'] = np.array([msg.velocity.x])
data[topic]['vy'] = np.array([msg.velocity.y])
data[topic]['vz'] = np.array([msg.velocity.z])
data[topic]['acc_x'] = np.array([msg.acceleration.x])
data[topic]['acc_y'] = np.array([msg.acceleration.y])
data[topic]['acc_z'] = np.array([msg.acceleration.z])
data[topic]['yaw'] = np.array([msg.yaw])
data[topic]['t'] = np.array([msg.header.stamp.to_sec()])
return data
def update_trpy_cmd(data, topic, msg):
if topic in data:
data[topic]['roll'] = | np.append(data[topic]['roll'], msg.roll) | numpy.append |
"""Create constant and point scatterer models."""
import numpy as np
import scipy.special
import scipy.integrate
from scipy.ndimage.interpolation import shift
from smii.modeling.propagators.propagators import (Scalar1D, Scalar2D)
from smii.modeling.wavelets.wavelets import ricker
from smii.modeling.forward_model import forward_model
from smii.inversion.fwi import costjac
def direct_1d(x, x_s, dx, dt, c, f):
"""Use the 1D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.abs(x - x_s)
t_shift = (r/c) / dt + 1
u = dx * dt * c / 2 * np.cumsum(shift(f, t_shift))
return u
def direct_2d(x, t, x_s, dx, dt, c, f):
"""Use the 2D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.linalg.norm(x - x_s)
t_max = np.maximum(0, int((t - r/c) / dt))
tmtp = t - np.arange(t_max) * dt
summation = np.sum(f[:t_max] / np.sqrt(c**2 * tmtp**2 - r**2))
u = dx**2 * dt * c / 2 / np.pi * summation
return u
def direct_2d2(x, x_s, dx, dt, c, f):
"""Use the 2D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.linalg.norm(x - x_s)
nt = len(f)
def func(tp, t):
return f[int(tp / dt)] / np.sqrt(c**2 * (t - tp)**2 - r**2)
u = np.zeros_like(f)
t_max = int(r/c / dt)
for t_idx in range(t_max):
t = t_idx * dt
u[t_idx] = scipy.integrate.quad(func, 0, t, (t+dt))[0]
u *= dx**2 * dt * c / 2 / np.pi
return u
def direct_2d_approx(x, x_s, dx, dt, c, f):
"""Same as direct_2d, but using an approximation to calculate the result
for the whole time range of the source.
"""
r = np.linalg.norm(x - x_s)
nt = len(f)
w = np.fft.rfftfreq(nt, dt)
fw = np.fft.rfft(f)
G = 1j / 4 * scipy.special.hankel1(0, -2 * np.pi * w * r / c)
G[0] = 0
s = G * fw * dx**2
u = np.fft.irfft(s, nt)
return u
def direct_3d(x, x_s, dx, dt, c, f):
"""Use the 3D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.linalg.norm(x - x_s)
t_shift = (r/c) / dt + 1
u = dx**3 * dt / 4 / np.pi / r * shift(f, t_shift)
return u
def scattered_1d(x, x_s, x_p, dx, dt, c, dc, f):
u_p = direct_1d(x_p, x_s, dx, dt, c, f)
du_pdt2 = np.gradient(np.gradient(u_p)) / dt**2
u = 2 * dc / c**3 * direct_1d(x, x_p, dx, dt, c, du_pdt2)
return u
def scattered_2d(x, x_s, x_p, dx, dt, c, dc, f):
u_p = direct_2d_approx(x_p, x_s, dx, dt, c, f)
du_pdt2 = np.gradient(np.gradient(u_p)) / dt**2
u = 2 * dc / c**3 * direct_2d_approx(x, x_p, dx, dt, c, du_pdt2)
return u
def scattered_3d(x, x_s, x_p, dx, dt, c, dc, f):
u_p = direct_3d(x_p, x_s, dx, dt, c, f)
du_sdt2 = np.gradient(np.gradient(u_p)) / dt**2
u = 2 * dc / c**3 * direct_3d(x, x_p, dx, dt, c, du_pdt2)
return u
def grad_1d(nx, x_r, x_s, x_p, dx, dt, c, dc, f):
d = -scattered_1d(x_r, x_s, x_p, dx, dt, c, dc, f)[::-1]
grad = np.zeros(nx, np.float32)
for x_idx in range(nx):
x = x_idx*dx
u_r = direct_1d(x, x_r, dx, dt, c, d)[::-1]
u_0 = direct_1d(x, x_s, dx, dt, c, f)
du_0dt2 = np.gradient(np.gradient(u_0)) / dt**2
grad[x_idx] = 2 * dt / c**3 * np.sum(u_r * du_0dt2)
return grad
def grad_2d(nx, x_r, x_s, x_p, dx, dt, c, dc, f):
d = -scattered_2d(x_r, x_s, x_p, dx, dt, c, dc, f)[::-1]
grad = np.zeros(nx, np.float32)
for z_idx in range(nx[0]):
for x_idx in range(nx[1]):
x = np.array([z_idx*dx, x_idx*dx])
u_r = direct_2d_approx(x, x_r, dx, dt, c, d)[::-1]
u_0 = direct_2d_approx(x, x_s, dx, dt, c, f)
du_0dt2 = np.gradient(np.gradient(u_0)) / dt**2
grad[z_idx, x_idx] = 2 * dt / c**3 * np.sum(u_r * du_0dt2)
return grad
def grad_1d_fd(model_true, model_init, x_r, x_s, dx, dt, dc, f,
propagator=None, prop_kwargs=None):
x_r_idx, x_s_idx = (np.array([x_r, x_s]) / dx).astype(np.int)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar1D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model_true, dx, dt, source, **prop_kwargs)
true_data, _ = forward_model(prop, receiver_locations)
receiver = {}
receiver['amplitude'] = true_data.receivers
receiver['locations'] = receiver_locations
dataset = [(source, receiver)]
init_cost, fwi_grad = costjac(model_init, dataset, dx, dt, propagator,
model_init.shape, compute_grad=True,
prop_kwargs=prop_kwargs)
nx = len(model_true)
true_grad = np.zeros(nx, np.float32)
for x_idx in range(nx):
tmp_model = model_init.copy()
tmp_model[x_idx] += dc
new_cost, _ = costjac(tmp_model, dataset, dx, dt, propagator,
model_init.shape, compute_grad=False,
prop_kwargs=prop_kwargs)
true_grad[x_idx] = (new_cost - init_cost) / dc
return fwi_grad, true_grad
def grad_2d_fd(model_true, model_init, x_r, x_s, dx, dt, dc, f,
propagator=None, prop_kwargs=None):
x_r_idx, x_s_idx = (np.array([x_r, x_s]) / dx).astype(np.int)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar2D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model_true, dx, dt, source, **prop_kwargs)
true_data, _ = forward_model(propagator, receiver_locations)
receiver = {}
receiver['amplitude'] = true_data.receivers
receiver['locations'] = receiver_locations
dataset = [(source, receiver)]
init_cost, fwi_grad = costjac(model_init, dataset, dx, dt, propagator,
model_init.shape, compute_grad=True,
prop_kwargs=prop_kwargs)
true_grad = np.zeros_like(model_true)
for z_idx in range(model_true.shape[0]):
for x_idx in range(model_true.shape[1]):
tmp_model = model_init.copy()
tmp_model[z_idx, x_idx] += dc
new_cost, _ = costjac(tmp_model, dataset, dx, dt, propagator,
model_init.shape, compute_grad=False,
prop_kwargs=prop_kwargs)
true_grad[z_idx, x_idx] = (new_cost - init_cost) / dc
return fwi_grad, true_grad
def _make_source_receiver(x_s_idx, x_r_idx, f):
source = {}
source['amplitude'] = f.reshape(1, 1, -1)
source['locations'] = x_s_idx.reshape(1, 1, -1)
receiver_locations = x_r_idx.reshape(1, 1, -1)
return source, receiver_locations
def _set_coords(x, dx):
x_m = | np.array(x) | numpy.array |
import random
import numpy as np
from scipy.sparse import csc_matrix, lil_matrix
def binary_search(array, x):
"""
Binary search
:param array: array: Must be sorted
:param x: value to search
:return: position where it is found, -1 if not found
"""
lower = 0
upper = len(array)
while lower < upper: # use < instead of <=
mid = lower + (upper - lower) // 2 # // is the integer division
val = array[mid]
if x == val:
return mid
elif x > val:
if lower == mid:
break
lower = mid
elif x < val:
upper = mid
return -1
def slice(A: csc_matrix, rows, cols):
"""
CSC matrix sub-matrix view
Only works if rows is sorted
:param A: CSC matrix to get the view from
:param rows: array of selected rows: must be sorted! to use the binary search
:param cols: array of columns: should be sorted
:return:
"""
n_rows = len(rows)
n_cols = len(cols)
n = 0
p = 0
new_val = np.empty(A.nnz)
new_row_ind = np.empty(A.nnz)
new_col_ptr = np.empty(n_cols + 1)
new_col_ptr[p] = 0
for j in cols: # sliced columns
for k in range(A.indptr[j], A.indptr[j + 1]): # columns from A
found_idx = binary_search(rows, A.indices[k]) # look for the row index of A in the rows vector
if found_idx > -1:
new_val[n] = A.data[k]
new_row_ind[n] = found_idx
n += 1
p += 1
new_col_ptr[p] = n
new_col_ptr[p] = n
new_val = np.resize(new_val, n)
new_row_ind = np.resize(new_row_ind, n)
return csc_matrix((new_val, new_row_ind, new_col_ptr), shape=(n_rows, n_cols))
def csc_sub_matrix(Am, Annz, Ap, Ai, Ax, rows, cols):
"""
CSC matrix sub-matrix slice
Works for sorted and unsorted versions of "rows", but "rows" cannot contain duplicates
:param Am: number of rows
:param Annz: number of non-zero entries
:param Ap: Column pointers
:param Ai: Row indices
:param Ax: Data
:param rows: array of selected rows: must be sorted! to use the binary search
:param cols: array of columns: should be sorted
:return: new_val, new_row_ind, new_col_ptr, n_rows, n_cols
"""
n_rows = len(rows)
n_cols = len(cols)
nnz = 0
p = 0
new_val = np.empty(Annz)
new_row_ind = np.empty(Annz)
new_col_ptr = np.empty(n_cols + 1)
new_col_ptr[p] = 0
# generate lookup -> index lookup
lookup = np.zeros(Am, dtype=int)
lookup[rows] = np.arange(len(rows), dtype=int)
for j in cols: # sliced columns
for k in range(Ap[j], Ap[j + 1]): # columns from A
# row index translation to the "rows" space
i = Ai[k]
ii = lookup[i]
if rows[ii] == i: # entry found
new_val[nnz] = Ax[k]
new_row_ind[nnz] = ii
nnz += 1
p += 1
new_col_ptr[p] = nnz
new_col_ptr[p] = nnz
new_val = np.resize(new_val, nnz)
new_row_ind = np.resize(new_row_ind, nnz)
return new_val, new_row_ind, new_col_ptr, n_rows, n_cols
def slice2(A: csc_matrix, rows, cols):
"""
CSC matrix sub-matrix view
Works for unsorted versions of rows, but rows cannot contain repetitions
:param A: CSC matrix to get the view from
:param rows: array of selected rows: must be sorted! to use the binary search
:param cols: array of columns: should be sorted
:return:
"""
new_val, new_row_ind, new_col_ptr, n_rows, n_cols = csc_sub_matrix(Am=A.shape[0], Annz=A.nnz,
Ap=A.indptr, Ai=A.indices, Ax=A.data,
rows=rows, cols=cols)
return csc_matrix((new_val, new_row_ind, new_col_ptr), shape=(n_rows, n_cols))
def slice_r(A: csc_matrix, rows):
"""
CSC matrix sub-matrix view
:param A: CSC matrix to get the view from
:param rows: array of selected rows: must be sorted! to use the binary search
:return:
"""
n_rows = len(rows)
n_cols = A.shape[1]
n = 0
p = 0
new_val = np.empty(A.nnz)
new_row_ind = np.empty(A.nnz)
new_col_ptr = np.empty(n_cols + 1)
new_col_ptr[p] = 0
for j in range(n_cols): # sliced columns
for k in range(A.indptr[j], A.indptr[j + 1]): # columns from A
found_idx = binary_search(rows, A.indices[k]) # look for the row index of A in the rows vector
if found_idx > -1:
new_val[n] = A.data[k]
new_row_ind[n] = found_idx
n += 1
p += 1
new_col_ptr[p] = n
new_col_ptr[p] = n
new_val = np.resize(new_val, n)
new_row_ind = np.resize(new_row_ind, n)
return csc_matrix((new_val, new_row_ind, new_col_ptr), shape=(n_rows, n_cols))
def slice_c(A: csc_matrix, cols):
"""
CSC matrix sub-matrix view
:param A: CSC matrix to get the view from
:param cols: array of columns: should be sorted
:return:
"""
n_rows = A.shape[0]
n_cols = len(cols)
n = 0
p = 0
new_val = np.empty(A.nnz)
new_row_ind = np.empty(A.nnz)
new_col_ptr = np.empty(n_cols + 1)
new_col_ptr[p] = 0
for j in cols: # sliced columns
st = A.indptr[j]
nd = A.indptr[j + 1]
for k in range(st, nd): # columns from A
new_val[n] = A.data[k]
new_row_ind[n] = A.indices[k]
n += 1
p += 1
new_col_ptr[p] = n
new_col_ptr[p] = n
new_val = np.resize(new_val, n)
new_row_ind = np.resize(new_row_ind, n)
return csc_matrix((new_val, new_row_ind, new_col_ptr), shape=(n_rows, n_cols))
def _minor_index_fancy(A, idx):
"""
Rows of a CSC matrix
:param A:
:param idx:
:return:
"""
"""Index along the minor axis where idx is an array of ints.
"""
idx_dtype = A.indices.dtype
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = A._swap(A.shape)
k = len(idx)
new_shape = A._swap((M, k))
if k == 0:
return A.__class__(new_shape)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(A.indptr)
csr_column_index1(k, idx, M, N, A.indptr, A.indices,
col_offsets, res_indptr)
# pass 2: copy indices/data for selected idxs
col_order = | np.argsort(idx) | numpy.argsort |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import torch.nn as nn
import torch
from torch.utils.data import DataLoader
import torch.utils.data as data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class BusDataset(data.Dataset):
def __init__(self, bus, train):
if train:
self.df = pd.read_csv(f'Data/proov_001/proov_001_merged_data.csv', sep=';')
self.df2 = pd.read_csv(f'Data/proov_003/proov_003_merged_data.csv', sep=';')
self.df = self.df.append(self.df2, ignore_index=True)
else:
self.df = pd.read_csv(f'Data/proov_002/proov_002_merged_data.csv', sep=';')
self.df['timestamp_in'] = self.df['Unnamed: 0']
del self.df['Unnamed: 0']
self.df['timestamp_in'] = pd.to_datetime(self.df['timestamp_in'], format='%Y-%m-%d %H:%M:%S')
self.df['timestamp_in'] = self.df.timestamp_in.values.astype(np.float64) // 10 ** 9
self.df['timestamp_exit'] = pd.to_datetime(self.df['timestamp_exit'], format='%Y-%m-%d %H:%M:%S')
self.df['timestamp_exit'] = self.df.timestamp_exit.values.astype(np.float64) // 10 ** 9
self.df = self.process_data(self.df)
self.cmf = pd.DataFrame()
self.cmf['pos'] = self.df['cmf_pos']
self.cmf['neg'] = self.df['cmf_neg']
del self.df['cmf_pos']
del self.df['cmf_neg']
self.data_size = self.df.shape[0]
def process_data(self, df):
df.dropna(inplace=True)
df = df.drop(['gps_point_entry', 'gps_point_exit', 'timestamp_in', 'timestamp_exit'], axis=1).astype(np.float)
return df
def __getitem__(self, item):
inputs = torch.tensor(self.df.iloc[item].values).type(torch.FloatTensor)
pos_targets = torch.tensor(self.cmf['pos'].iloc[item]).type(torch.FloatTensor)
neg_targets = torch.tensor(self.cmf['neg'].iloc[item]).type(torch.FloatTensor)
return inputs, pos_targets, neg_targets
def __len__(self):
return self.data_size
@property
def input_size(self):
return self.df.shape[1]
class LSTM(nn.Module):
def __init__(self, batch_size, input_size, output_size, dropout,
lstm_num_hidden=256, lstm_num_layers=2, device='cuda'):
super(LSTM, self).__init__()
# Initialization of LSTM
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=lstm_num_hidden,
num_layers=lstm_num_layers,
bidirectional=False,
batch_first=False,
dropout=dropout
)
self.linear = nn.Linear(lstm_num_hidden, output_size, bias=True)
def forward(self, x, hc=None):
"""Forward pass of LSTM"""
out, (h, c) = self.lstm(x, hc)
out = self.linear(out)
return out, (h, c)
def make_plots(pos_pred, pos_train, neg_pred, neg_train, pos_test, neg_test):
pos_targets, pos_out = pos_pred
plt.subplot(2, 2, 1)
plt.plot(pos_targets, color='r', label='Targets')
plt.plot(pos_out, color='b', label='Predictions')
plt.xlabel('Datapoint from 2017')
plt.ylabel('CMF score')
plt.title("Positive cmf targets vs the predicted positive cmf")
plt.legend()
pos_loss, pos_acc = pos_train
plt.subplot(2, 2, 2)
plt.plot(pos_loss, color='r', label='Loss')
plt.plot(pos_acc, color='b', label='Accuracy')
plt.xlabel('Iteration number')
plt.ylabel('Loss and accuracy (in percentages) score')
plt.title("Loss and accuracy of the trained positive model")
plt.legend()
neg_targets, neg_out = neg_pred
plt.subplot(2, 2, 3)
plt.plot(neg_targets, color='r', label='Targets')
plt.plot(neg_out, color='b', label='Predictions')
plt.xlabel('Datapoint from 2017')
plt.ylabel('CMF score')
plt.title("Negative cmf targets vs the predicted negative cmf")
plt.legend()
neg_loss, neg_acc = neg_train
plt.subplot(2, 2, 4)
plt.plot(neg_loss, color='r', label='Loss')
plt.plot(neg_acc, color='b', label='Accuracy')
plt.xlabel('Iteration number')
plt.ylabel('Loss and accuracy (in percentages) score')
plt.title("Loss and accuracy of the trained negative model")
plt.legend()
plt.show()
plt.subplot(2,1,1)
pred_pos, target_pos = pos_test
pred_pos = np.array(pred_pos).ravel()
target_pos = target_pos.numpy()
plt.plot(target_pos, color='b', label='target')
plt.plot(pred_pos, color='r', label='prediction')
plt.xlabel('Datapoint from 2017')
plt.ylabel('CMF score')
plt.title("Prediction vs target test set of positive cmf")
plt.legend()
plt.subplot(2, 1, 2)
pred_neg, target_neg = neg_test
pred_neg = np.array(pred_neg).ravel()
target_neg = target_neg.numpy()
plt.plot(target_neg, color='b', label='target')
plt.plot(pred_neg, color='r', label='prediction')
plt.xlabel('Datapoint from 2017')
plt.ylabel('CMF score')
plt.title("Prediction vs target test set of negative cmf")
plt.legend()
plt.show()
def accuracy(predictions, target, batch_size, tolerance):
prediction = predictions.detach().numpy()[0].T
target = target.numpy()
diff_array = np.abs(prediction - target)
return ((diff_array <= tolerance).sum()/batch_size) * 100
def get_baseline(pos_pred, neg_pred):
df = pd.read_csv(f'Data/proov_002/proov_002_merged_data.csv', sep=';')
df['timestamp_in'] = df['Unnamed: 0']
del df['Unnamed: 0']
df['timestamp_in'] = pd.to_datetime(df['timestamp_in'], format='%Y-%m-%d %H:%M:%S')
df['timestamp_exit'] = pd.to_datetime(df['timestamp_exit'], format='%Y-%m-%d %H:%M:%S')
df.dropna(inplace=True)
cmf = pd.DataFrame()
cmf['pos'] = df['cmf_pos']
cmf['neg'] = df['cmf_neg']
cmf['timestamp'] = df['timestamp_in']
cmf = cmf.set_index('timestamp')
davg = cmf.resample('D').mean().fillna(method='ffill')
davg['pos'] = davg['pos'].shift(1)
davg['neg'] = davg['neg'].shift(1)
davg = davg.fillna(method='bfill')
cmf.index = cmf.index.normalize()
total = []
for index, row in davg.iterrows():
if index in cmf.index:
daily_values_pos = cmf.get_value(index, 'pos')
daily_values_neg = cmf.get_value(index, 'neg')
if not hasattr(daily_values_neg, "__iter__"):
daily_values_neg = [daily_values_neg]
daily_values_pos = [daily_values_pos]
for i, _ in enumerate(daily_values_pos):
total.append([daily_values_pos[i], daily_values_neg[i], row['pos'], row['neg']])
else:
continue
array = np.array(total)
diffs_pos = np.abs(array[:, 0] - array[:, 2])
diffs_neg = np.abs(array[:, 1] - array[:, 3])
len_pos = len( | np.where(diffs_pos < 0.5) | numpy.where |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 08:04, 21/09/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# -------------------------------------------------------------------------------------------------------%
from numpy import zeros, array, log, abs, exp, sqrt, pi, round, sin, cos, arccos, remainder, arcsin, int, arctan, imag, log10
from scipy.optimize import fminbound
from opfunu.cec.cec2020 import constant
# Industrial Chemical Processes
def p1(x):
# Heat Exchanger Network Design (case 1)
out = constant.benchmark_function(1)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
fx = 35 * x[0] ** 0.6 + 35 * x[1] ** 0.6
gx = 0
hx = zeros(h)
hx[0] = 200 * x[0] * x[3] - x[2]
hx[1] = 200 * x[1] * x[5] - x[4]
hx[2] = x[2] - 1000 * (x[6] - 100)
hx[3] = x[4] - 10000 * (300 - x[6])
hx[4] = x[2] - 10000 * (600 - x[7])
hx[5] = x[4] - 10000 * (900 - x[8])
hx[6] = x[3] * log(abs(x[7] - 100) + 1e-8) - x[3] * log((600 - x[6]) + 1e-8) - x[7] + x[6] + 500
hx[7] = x[5] * log(abs(x[8] - x[6]) + 1e-8) - x[5] * log(600) - x[8] + x[6] + 600
return fx, gx, hx
def p2(x):
# Heat Exchanger Network Design (case 1)
out = constant.benchmark_function(2)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
fx = (x[0] / (120 * x[3])) ** 0.6 + (x[1] / (80 * x[4])) ** 0.6 + (x[2] / (40 * x[5])) * 0.6
gx = 0
hx = zeros(h)
hx[0] = x[0] - 1e4 * (x[6] - 100)
hx[1] = x[1] - 1e4 * (x[7] - x[6])
hx[2] = x[2] - 1e4 * (500 - x[7])
hx[3] = x[0] - 1e4 * (300 - x[8])
hx[4] = x[1] - 1e4 * (400 - x[9])
hx[5] = x[2] - 1e-4 * (600 - x[10])
hx[6] = x[3] * log(abs(x[8] - 100) + 1e-8) - x[3] * log(300 - x[6] + 1e-8) - x[8] - x[6] + 400
hx[7] = x[4] * log(abs(x[9] - x[6]) + 1e-8) - x[4] * log(abs(400 - x[7]) + 1e-8) - x[9] + x[6] - x[7] + 400
hx[8] = x[5] * log(abs(x[10] - x[7]) + 1e-8) - x[5] * log(100) - x[10] + x[7] + 100
return fx, gx, hx
def p3(x):
# Optimal Operation of Alkylation Unit
out = constant.benchmark_function(3)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
fx = -1.715 * x[0] - 0.035 * x[0] * x[5] - 4.0565 * x[2] - 10.0 * x[1] + 0.063 * x[2] * x[4]
hx = 0
gx = zeros(g)
gx[0] = 0.0059553571 * x[5] ** 2 * x[0] + 0.88392857 * x[2] - 0.1175625 * x[5] * x[0] - x[0]
gx[1] = 1.1088 * x[0] + 0.1303533 * x[0] * x[5] - 0.0066033 * x[0] * x[5] ** 2 - x[2]
gx[2] = 6.66173269 * x[5] ** 2 + 172.39878 * x[4] - 56.596669 * x[3] - 191.20592 * x[5] - 10000
gx[3] = 1.08702 * x[5] + 0.32175 * x[3] - 0.03762 * x[5] ** 2 - x[4] + 56.85075
gx[4] = 0.006198 * x[6] * x[3] * x[2] + 2462.3121 * x[1] - 25.125634 * x[1] * x[3] - x[2] * x[3]
gx[5] = 161.18996 * x[2] * x[3] + 5000.0 * x[1] * x[3] - 489510.0 * x[1] - x[2] * x[3] * x[6]
gx[6] = 0.33 * x[6] - x[4] + 44.333333
gx[7] = 0.022556 * x[4] - 0.007595 * x[6] - 1.0
gx[8] = 0.00061 * x[2] - 0.0005 * x[0] - 1.0
gx[9] = 0.819672 * x[0] - x[2] + 0.819672
gx[10] = 24500.0 * x[1] - 250.0 * x[1] * x[3] - x[2] * x[3]
gx[11] = 1020.4082 * x[3] * x[1] + 1.2244898 * x[2] * x[3] - 100000 * x[1]
gx[12] = 6.25 * x[0] * x[5] + 6.25 * x[0] - 7.625 * x[2] - 100000
gx[13] = 1.22 * x[2] - x[5] * x[0] - x[0] + 1.0
return fx, gx, hx
def p4(x):
# Reactor Network Design (RND)
out = constant.benchmark_function(4)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
hx = zeros(h)
k1 = 0.09755988
k2 = 0.99 * k1
k3 = 0.0391908
k4 = 0.9 * k3
fx = -x[3]
hx[0] = x[0] + k1 * x[1] * x[4] - 1
hx[1] = x[1] - x[0] + k2 * x[1] * x[5]
hx[2] = x[2] + x[0] + k3 * x[2] * x[4] - 1
hx[3] = x[3] - x[2] + x[1] - x[0] + k4 * x[3] * x[5]
gx = x[4] ** 0.5 + x[5] ** 0.5 - 4
return fx, gx, hx
def p5(x):
# Haverly's Pooling Problem
out = constant.benchmark_function(5)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
fx = -(9 * x[0] + 15 * x[1] - 6 * x[2] - 16 * x[3] - 10 * (x[4] + x[5]))
gx = zeros(g)
hx = zeros(h)
gx[0] = x[8] * x[6] + 2 * x[4] - 2.5 * x[0]
gx[1] = x[8] * x[7] + 2 * x[5] - 1.5 * x[1]
hx[0] = x[6] + x[7] - x[2] - x[3]
hx[1] = x[0] - x[6] - x[4]
hx[2] = x[1] - x[7] - x[5]
hx[3] = x[8] * x[6] + x[8] * x[7] - 3 * x[2] - x[3]
return fx, gx, hx
def p6(x):
# Blending-Pooling-Separation problem
out = constant.benchmark_function(6)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
fx = 0.9979 + 0.00432 * x[4] + 0.01517 * x[12]
gx = 0
hx = zeros(h)
hx[0] = x[0] + x[1] + x[2] + x[3] - 300
hx[1] = x[5] - x[6] - x[7]
hx[2] = x[8] - x[9] - x[10] - x[11]
hx[3] = x[13] - x[14] - x[15] - x[16]
hx[4] = x[17] - x[18] - x[19]
hx[5] = x[4] * x[20] - x[5] * x[21] - x[8] * x[22]
hx[6] = x[4] * x[23] - x[5] * x[24] - x[8] * x[25]
hx[7] = x[4] * x[26] - x[5] * x[27] - x[8] * x[28]
hx[8] = x[12] * x[29] - x[13] * x[30] - x[17] * x[31]
hx[9] = x[12] * x[32] - x[13] * x[33] - x[17] * x[34]
hx[10] = x[12] * x[35] - x[13] * x[36] - x[17] * x[37]
hx[11] = 1 / 3 * x[0] + x[14] * x[30] - x[4] * x[20]
hx[12] = 1 / 3 * x[0] + x[14] * x[33] - x[4] * x[23]
hx[13] = 1 / 3 * x[0] + x[14] * x[36] - x[4] * x[26]
hx[14] = 1 / 3 * x[1] + x[9] * x[22] - x[12] * x[29]
hx[15] = 1 / 3 * x[1] + x[9] * x[25] - x[12] * x[32]
hx[16] = 1 / 3 * x[1] + x[9] * x[28] - x[12] * x[35]
hx[17] = 1 / 3 * x[2] + x[6] * x[21] + x[10] * x[22] + x[15] * x[30] + x[18] * x[31] - 30
hx[18] = 1 / 3 * x[2] + x[6] * x[24] + x[10] * x[25] + x[15] * x[33] + x[18] * x[34] - 50
hx[19] = 1 / 3 * x[2] + x[6] * x[27] + x[10] * x[28] + x[15] * x[36] + x[18] * x[37] - 30
hx[20] = x[20] + x[23] + x[26] - 1
hx[21] = x[21] + x[24] + x[27] - 1
hx[22] = x[22] + x[25] + x[28] - 1
hx[23] = x[29] + x[32] + x[35] - 1
hx[24] = x[30] + x[33] + x[36] - 1
hx[25] = x[31] + x[34] + x[37] - 1
hx[26] = x[24]
hx[27] = x[27]
hx[28] = x[22]
hx[29] = x[36]
hx[30] = x[31]
hx[31] = x[34]
return fx, gx, hx
def p7(x):
# Propane, Isobutane, n-Butane Nonsharp Separation
out = constant.benchmark_function(7)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
c = array([[0.23947, 0.75835], [-0.0139904, -0.0661588], [0.0093514, 0.0338147],
[0.0077308, 0.0373349], [-0.0005719, 0.0016371], [0.0042656, 0.0288996]])
fx = c[0, 0] + (c[1, 0] + c[2, 0] * x[23] + c[3, 0] * x[27] + c[4, 0] * x[32] + c[5, 0] * x[33]) * x[4] \
+ c[0, 1] + (c[1, 1] + c[2, 1] * x[25] + c[3, 1] * x[30] + c[4, 1] * x[37] + c[5, 1] * x[38]) * x[12]
gx = 0
hx = zeros(h)
hx[0] = x[0] + x[1] + x[2] + x[3] - 300
hx[1] = x[5] - x[6] - x[7]
hx[2] = x[8] - x[9] - x[10] - x[11]
hx[3] = x[13] - x[14] - x[15] - x[16]
hx[4] = x[17] - x[18] - x[19]
hx[5] = x[5] * x[20] - x[23] * x[24]
hx[6] = x[13] * x[21] - x[25] * x[26]
hx[7] = x[8] * x[22] - x[27] * x[28]
hx[8] = x[17] * x[29] - x[30] * x[31]
hx[9] = x[24] - x[4] * x[32]
hx[10] = x[28] - x[4] * x[33]
hx[11] = x[34] - x[4] * x[35]
hx[12] = x[36] - x[12] * x[37]
hx[13] = x[26] - x[12] * x[38]
hx[14] = x[31] - x[12] * x[39]
hx[15] = x[24] - x[5] * x[20] - x[8] * x[40]
hx[16] = x[28] - x[5] * x[41] - x[8] * x[22]
hx[17] = x[34] - x[5] * x[42] - x[8] * x[43]
hx[18] = x[36] - x[13] * x[44] - x[17] * x[45]
hx[19] = x[26] - x[13] * x[21] - x[17] * x[46]
hx[20] = x[31] - x[13] * x[47] - x[17] * x[29]
hx[21] = 1 / 3 * x[0] + x[14] * x[44] - x[24]
hx[22] = 1 / 3 * x[0] + x[14] * x[21] - x[28]
hx[23] = 1 / 3 * x[0] + x[14] * x[47] - x[34]
hx[24] = 1 / 3 * x[1] + x[9] * x[40] - x[36]
hx[25] = 1 / 3 * x[1] + x[9] * x[22] - x[26]
hx[26] = 1 / 3 * x[1] + x[9] * x[43] - x[31]
hx[27] = x[32] + x[33] + x[35] - 1
hx[28] = x[20] + x[41] + x[42] - 1
hx[29] = x[40] + x[22] + x[43] - 1
hx[30] = x[37] + x[38] + x[39] - 1
hx[31] = x[44] + x[21] + x[47] - 1
hx[32] = x[45] + x[46] + x[29] - 1
hx[33] = x[42]
hx[34] = x[45]
hx[35] = 1 / 3 * x[2] + x[6] * x[20] + x[10] * x[40] + x[15] * x[44] + x[18] * x[45] - 30
hx[36] = 1 / 3 * x[2] + x[6] * x[41] + x[10] * x[22] + x[15] * x[21] + x[18] * x[46] - 50
hx[37] = 1 / 3 * x[2] + x[6] * x[42] + x[10] * x[43] + x[15] * x[47] + x[18] * x[29] - 30
return fx, gx, hx
def p8(x):
# Process synthesis problem
out = constant.benchmark_function(8)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
x[1] = round(x[1])
fx = 2 * x[0] + x[1]
hx = 0
gx = zeros(g)
gx[0] = 1.25 - x[0] ** 2 - x[1]
gx[1] = x[0] + x[1] - 1.6
return fx, gx, hx
def p9(x):
# Process synthesis and design problem
out = constant.benchmark_function(9)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
x[2] = round(x[2])
fx = -x[2] + 2 * x[0] + x[1]
hx = x[0] - 2 * exp(-x[1])
gx = -x[0] + x[1] + x[2]
return fx, gx, hx
def p10(x):
# Process flow sheeting problem
out = constant.benchmark_function(10)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
gx = zeros(g)
x[2] = round(x[2])
fx = -0.7 * x[2] + 5 * (x[0] - 0.5) ** 2 + 0.8
gx[0] = -exp(x[0] - 0.2) - x[1]
gx[1] = x[1] + 1.1 * x[2] + 1
gx[2] = x[0] - x[2] - 0.2
hx = 0
return fx, gx, hx
def p11(x):
# Two-reactor Problem
out = constant.benchmark_function(11)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
x1 = x[0]
x2 = x[1]
v1 = x[2]
v2 = x[3]
y1 = round(x[4])
y2 = round(x[5])
x_ = x[6]
z1 = 0.9 * (1 - exp(-0.5 * v1)) * x1
z2 = 0.8 * (1 - exp(-0.4 * v2)) * x2
fx = 7.5 * y1 + 5.5 * y2 + 7 * v1 + 6 * v2 + 5 * x_
hx = zeros(h)
gx = zeros(g)
hx[0] = y1 + y2 - 1
hx[1] = z1 + z2 - 10
hx[2] = x1 + x2 - x_
hx[3] = z1 * y1 + z2 * y2 - 10
gx[0] = v1 - 10 * y1
gx[1] = v2 - 10 * y2
gx[2] = x1 - 20 * y1
gx[3] = x2 - 20 * y2
return fx, gx, hx
def p12(x):
# Process synthesis problem
out = constant.benchmark_function(12)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
x1 = x[0]
x2 = x[1]
x3 = x[2]
y1 = round(x[3])
y2 = round(x[4])
y3 = round(x[5])
y4 = round(x[6])
fx = (y1 - 1) ** 2 + (y2 - 1) ** 2 + (y3 - 1) ** 2 - log(y4 + 1) + (x1 - 1) ** 22 + (x2 - 2) ** 2 + (x3 - 3) ** 2
gx = zeros(g)
gx[0] = x1 + x2 + x3 + y1 + y2 + y3 - 5
gx[1] = y3 ** 2 + x1 ** 2 + x2 ** 2 + x3 ** 2 - 5.5
gx[2] = x1 + y1 - 1.2
gx[3] = x2 + y2 - 1.8
gx[4] = x3 + y3 - 2.5
gx[5] = x1 + y4 - 1.2
gx[6] = y2 ** 2 + x2 ** 2 - 1.64
gx[7] = y3 ** 2 + x3 ** 2 - 4.25
gx[8] = y2 ** 2 + x3 ** 2 - 4.64
hx = 0
return fx, gx, hx
def p13(x):
# Process design Problem
out = constant.benchmark_function(13)
D, g, h, xmin, xmax = out["D"], out["g"], out["h"], out["xmin"], out["xmax"]
x1 = x[0]
x2 = x[1]
x3 = x[2]
y1 = round(x[3])
y2 = round(x[4])
fx = -5.357854 * x1 ** 2 - 0.835689 * y1 * x3 - 37.29329 * y1 + 40792.141
a = [85.334407, 0.0056858, 0.0006262, 0.0022053, 80.51249, 0.0071317, 0.0029955, 0.0021813, 9.300961, 0.0047026, 0.0012547, 0.0019085]
gx = | zeros(g) | numpy.zeros |
# Copyright (c) Facebook, Inc. and its affiliates
# Copyright (c) MTRF authors
import collections
import numpy as np
import os
from typing import Any, Dict, Optional, NewType, Sequence, Union, Tuple
from pathlib import Path
import pickle
from r3l import PROJECT_PATH, RESET_STATES_PATH
from r3l.r3l_envs.inhand_env.base import ObjectType
from r3l.r3l_envs.inhand_env.reposition import SawyerDhandInHandObjectRepositionFixed
from r3l.robot.default_configs import FLIPUP_SAWYER_ROBOT_CONFIG, ARM_QPOS_PALMDOWN, MOCAP_EULER_PALMUP, MOCAP_EULER_PALMDOWN
from r3l.utils.circle_math import circle_distance
from r3l.utils.quatmath import quat2euler, euler2quat, mat2quat
class SawyerDhandInHandObjectFlipUpFixed(SawyerDhandInHandObjectRepositionFixed):
DEFAULT_REWARD_KEYS_AND_WEIGHTS = {
"sawyer_to_target_x_circle_distance_reward": 5.0,
"sawyer_to_target_z_circle_distance_reward": 5.0,
# NOTE: This xyz reward is needed to make sure the flip up policy
# settles at a reasonable height
"object_to_target_xyz_distance_reward": 2.0,
"small_bonus": 1.0,
"big_bonus": 1.0,
"drop_penalty": 1.0,
}
def __init__(
self,
reward_keys_and_weights: dict = DEFAULT_REWARD_KEYS_AND_WEIGHTS,
**kwargs
):
if kwargs.get("object_type", None) == ObjectType.Valve3:
reset_policy_dirs = [
# (Path(PROJECT_PATH)
# / "r3l/r3l_agents/softlearning/SawyerDhandInHandValve3PickupFixed-v0/pickup_raised_valve")
]
reset_state_pkl_path = None # TODO
elif kwargs.get("object_type", None) in (
ObjectType.Dodecahedron, ObjectType.DodecahedronBasket, ObjectType.DodecahedronBulb):
reset_policy_dirs = [(
Path(PROJECT_PATH)
/ "r3l/r3l_agents/softlearning/SawyerDhandInHandDodecahedronPickupFixed-v0/pickup_trained_with_resets"
)]
reset_state_pkl_path = str(Path(RESET_STATES_PATH) / "dodecahedron/picked_up.pkl")
else:
print("Object type doesn't have a reset policy")
reset_policy_dirs = []
reset_state_pkl_path = None
env_params = dict(
task_name="Flip Up",
sawyer_config=FLIPUP_SAWYER_ROBOT_CONFIG,
reward_keys_and_weights=reward_keys_and_weights,
# Set a default init and target
init_xyz_range_params={
"type": "DiscreteRange",
"values": [ | np.array([0.72, 0.15, 0.75]) | numpy.array |
#!/usr/bin/env python3
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib
import warnings
### The following tool sorts the output spikes from the Spike simulator (1D arrays) into 2D .csv files suitable for analysis by the Russo algorithm ###
#For details on the Russo algorithm, please see the publication "Cell assemblies at multiple time scales with arbitrary lag constellations", Russo and Durstewitz, 2017
#Spike is a simulator written in C++/CUDA for spiking neural networks, more information on which can be found here: https://sites.google.com/view/spike-simulator/home
#This code assumes the user is analyzing neural networks wherein each layer is a 2D lattice, and multiple layers are present (see for example the network used in the paper
#"The Emergence of Polychronization and Feature Binding in a Spiking Neural Network Model of the Primate Ventral Visual System", Eguchi et al, 2018)
#The Russo algorithm requires a 2D array where rows represent neuron IDs, and each neurons nth spike is indicated by the column
#Each array entry specifies the time (in seconds) at which the spike occurred; empty entries are required to be filled with a NaN identifier
#As the Russo algorithm does not accept neurons that never spike (i.e. empty rows), the following code adds a single spike at a random time-point for such neurons;
#This latter functionality can be removed with the add_Russo_random_spike parameter below
#The user must provide (below) parameters that were used in generating the Spike neural network simulation
#Specify the number of neurons in each excitatory layer and each inhibitory layer, the number of layers, layer of interest, and the number of stimuli
#If using the 'binary network' architecture, _dim should specify the total size of each layer, and not the individual streams
#max_plot_time determines how many ms of data should be plotted here
#random_test_to_screen prints to screen additional tests that require visual inspection by the user
#shuffle_Boolean randomly shuffles the neuron ids locations, so that their firing rates remain the same, but any fixed temporal relationships are broken
params = {'extracted_layer' : 1,
'max_plot_time' : 0.4,
'excit_dim' : 5*5*2,
'number_of_presentations' : 50,
'duration_of_presentations' : 0.2,
'inhib_dim' : None,
'num_stimuli' : 2,
'add_Russo_random_spike' : True,
'manual_test_to_screen' : False,
'plot_Boolean' : True,
'save_output_Boolean' : True,
'shuffle_Boolean' : True}
#Loop through each stimulus
def main(params):
test_suite(params)
for jj in range(params['num_stimuli']):
(spike_ids, spike_times) = load_spikes(jj) #NB that neurons IDs begin at 1
if params['shuffle_Boolean'] == True:
spike_ids = shuffle_spikes(params, spike_ids, spike_times)
extracted_mask = extract_mask(params, spike_ids)
(extracted_ids, extracted_times) = extract_spikes(spike_ids, spike_times, extracted_mask)
max_spikes = np.max(np.bincount(extracted_ids)) #Find the number of spikes assoc. with the max-spiking neuron
Russo_array = initialize_Russo(params, max_spikes)
Russo_array = populate_Russo(params, extracted_ids, extracted_times, Russo_array)
if params['plot_Boolean'] == True:
#The plot function will generate a RunTime warning due to a NaN comparison; however, the expected output of this code is correct, so the warning is not displayed
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plot_Russo(params, Russo_array, jj)
if params['save_output_Boolean'] == True:
if params['shuffle_Boolean'] == True:
np.savetxt("./Processing_Data/shuffled_posttraining_stim" + str(jj+1) + "_layer" + str(params['extracted_layer']) + "_Russo.csv", Russo_array, delimiter=',')
else:
np.savetxt("./Processing_Data/posttraining_stim" + str(jj+1) + "_layer" + str(params['extracted_layer']) + "_Russo.csv", Russo_array, delimiter=',')
return None
#Load spike ids and times
def load_spikes(stimuli_iter):
spike_ids = np.genfromtxt('./Processing_Data/output_spikes_posttraining_stim' + str(stimuli_iter+1) +'SpikeIDs.txt')
spike_times = np.genfromtxt('./Processing_Data/output_spikes_posttraining_stim' + str(stimuli_iter+1) +'SpikeTimes.txt')
return (spike_ids, spike_times)
#Within each stimulus presentation, shuffle the association between spike IDs and their spike times, so that spikes still occur at different times, but with different neurons
#This maintains each neuron's firing rate, but breaks any temporal associations between a particular neuron and a particular spike time
def shuffle_spikes(params, spike_ids, spike_times):
shuffled_ids = []
#Iterate through all time-windows; note the addition of 1, as the last presentation is still associated with a window
for ii in range(params['number_of_presentations']+1):
#Find the indices of spikes in the given window of interest, and use these indices to extract the neuron IDs that spiked in the window
temp_IDs = spike_ids[np.where((ii*params['duration_of_presentations'] < spike_times) & (spike_times <= (ii+1)*params['duration_of_presentations']))]
#Shuffle those IDs and append them
np.random.shuffle(temp_IDs)
shuffled_ids.extend(temp_IDs)
assert len(shuffled_ids) == len(spike_ids), "Size of spike ID array not preserved after shuffling."
return np.asarray(shuffled_ids)
#Return extraction mask defining the spikes of interest
def extract_mask(params, spike_ids):
#Return an array of indeces for the neurons in the layer of interest
extracted_mask = np.where((params['excit_dim']*(params['extracted_layer']-1) < spike_ids) & (spike_ids <= params['extracted_layer']*params['excit_dim']))
return extracted_mask
#Extract and format spike ids and times of interest
def extract_spikes(spike_ids, spike_times, extracted_mask):
extracted_ids = np.take(spike_ids, extracted_mask) #An array of spike IDs, restricted to the layer of interest
extracted_times = np.take(spike_times, extracted_mask) #An array of spike times, restricted to the layer of interest
extracted_ids = np.reshape(extracted_ids, len(extracted_ids[0])) #Re-shape the array into a column array
extracted_ids = extracted_ids.astype(int) #Convert from Spike simulator's output (float)
return (extracted_ids, extracted_times)
#Initialize a NaN array with rows = number of unique neurons in the layer, and columns = number of spikes of the maximally active neuron
def initialize_Russo(params, max_spikes):
Russo_array = np.zeros([params['excit_dim'], max_spikes])
Russo_array[:, :] = np.nan
return Russo_array
#Iterate through each neuron of interest, inserting its spikes into the Russo-suitable array; if a neuron never spikes, insert a single random spike
def populate_Russo(params, extracted_ids, extracted_times, Russo_array):
for ii in range(0, params['excit_dim']):
#Extract a binary mask containing the indeces of when the neuron of interest has fired
temp_mask = np.where(extracted_ids == (params['excit_dim']*(params['extracted_layer'] - 1) + ii + 1))
#Use the mask to identify all the spike times associated with that neuron, and assign it to Russo_array
Russo_array[ii, 0:(np.size(np.take(extracted_times, temp_mask)))] = np.take(extracted_times, temp_mask)
if ((math.isnan(Russo_array[ii, 0]) == 1) and (params['add_Russo_random_spike'] == 1)): #If the first element is NaN, the entire row is (i.e. the neuron never spiked)
Russo_array[ii, 0] = np.random.random()*np.max(extracted_times) #Assigns the neuron a single spike, the time of which is sampled from a continuous uniform distribution
return(Russo_array)
def plot_Russo(params, Russo_array, stimuli_iter):
plt.figure(stimuli_iter)
warnings.warn("NaN_Comparison", RuntimeWarning)
for ii in range(0, params['excit_dim']):
#Plot each neuron's spikes in turn; note the y-valuess are multiplied by (ii+1), so that the y-axis labels correspond to the neuron index (original simulation, beginning at 1), and not the Russo_array index (which begins at 0)
plt.scatter(Russo_array[ii,(Russo_array[ii, :]<params['max_plot_time'])], np.ones(len(Russo_array[ii,(Russo_array[ii, :]<params['max_plot_time'])]))*(ii+1), c='k', marker='.')
plt.show()
### Testing Functions ###
#Unit test for extract_mask function (extracting first layer)
def test_extract_mask_FirstLayer():
test_params = {'extracted_layer' : 1, 'excit_dim' : 5}
test_spike_ids = np.array([4, 9, 1, 3, 9, 2, 10, 7, 5, 6, 2, 8, 1, 1])
exp = np.array([0, 2, 3, 5, 8, 10, 12, 13])
exp = np.reshape(exp, [1, len(exp)])
obs = extract_mask(test_params, test_spike_ids)
assert np.all(obs == exp)
#Unit test for extract_mask function (extracting higher layers)
def test_extract_mask_HigherLayer():
test_params = {'extracted_layer' : 2, 'excit_dim' : 5}
test_spike_ids = np.array([4, 9, 1, 3, 9, 2, 10, 7, 5, 6, 2, 8, 1, 1])
exp = np.array([1, 4, 6, 7, 9, 11])
exp = np.reshape(exp, [1, len(exp)])
obs = extract_mask(test_params, test_spike_ids)
assert np.all(obs == exp)
#Unit test for extract_spikes function
def test_extract_spikes():
test_spike_ids = np.array([4, 9, 1, 3, 9, 2, 10, 7])
test_spike_times = np.array([0.04, 0.06, 0.9, 1.2, 1.8, 4.0, 5.9, 10.2])
test_extracted_mask = np.array([0, 2, 3, 5])
test_extracted_mask = np.reshape(test_extracted_mask, [1, len(test_extracted_mask)])
exp_ids = ([4, 1, 3, 2])
exp_times = ([0.04, 0.9, 1.2, 4.0])
(obs_ids, obs_times) = extract_spikes(test_spike_ids, test_spike_times, test_extracted_mask)
assert (np.all(exp_ids == obs_ids) and np.all(exp_times == obs_times))
#Unit test for populate Russo function when first layer is of interest
def test_populate_Russo_FirstLayer(params):
test_params = {'extracted_layer' : 1, 'excit_dim' : 5, 'add_Russo_random_spike' : params['add_Russo_random_spike']}
test_extracted_ids = | np.array([5, 1, 1, 3, 1, 2]) | numpy.array |
"""Handling of individual transducers and their directivities.
This module contains classes describing how individual transducer elements radiate sound,
e.g. waveforms and directivities.
This is also where the various spatial properties, e.g. derivatives, are implemented.
Most calculations in this module are fully vectorized, so the models can calculate
sound fields for any number of source positions and receiver positions at once.
.. autosummary::
:nosignatures:
TransducerModel
PointSource
PlaneWaveTransducer
CircularPiston
CircularRing
TransducerReflector
"""
import numpy as np
import logging
from scipy.special import j0, j1
from scipy.special import spherical_jn, spherical_yn, sph_harm
from .materials import air
from . import utils
logger = logging.getLogger(__name__)
class TransducerModel:
"""Base class for ultrasonic single frequency transducers.
Parameters
----------
freq : float, default 40 kHz
The resonant frequency of the transducer.
p0 : float, default 6 Pa
The sound pressure created at maximum amplitude at 1m distance, in Pa.
Note: This is not an rms value!
medium : Material
The medium in which the array is operating.
physical_size : float, default 10e-3
The physical dimentions of the transducer. Mainly used for visualization
and some geometrical assumptions.
Attributes
----------
k : float
Wavenumber in the medium.
wavelength : float
Wavelength in the medium.
omega : float
Angular frequency.
freq : float
Wave frequency.
"""
_repr_fmt_spec = '{:%cls(freq=%freq, p0=%p0, medium=%mediumfull, physical_size=%physical_size)}'
_str_fmt_spec = '{:%cls(freq=%freq, p0=%p0, medium=%medium)}'
def __init__(self, freq=40e3, p0=6, medium=air, physical_size=10e-3):
self.medium = medium
self.freq = freq
self.p0 = p0
self.physical_size = physical_size
# The murata transducers are measured to 85 dB SPL at 1 V at 1 m, which corresponds to ~6 Pa at 20 V
# The datasheet specifies 120 dB SPL @ 0.3 m, which corresponds to ~6 Pa @ 1 m
def __format__(self, fmt_spec):
return fmt_spec.replace('%cls', self.__class__.__name__).replace('%freq', str(self.freq)).replace('%p0', str(self.p0)).replace('%mediumfull', repr(self.medium)).replace('%medium', str(self.medium)).replace('%physical_size', str(self.physical_size))
def __str__(self):
return self._str_fmt_spec.format(self)
def __repr__(self):
return self._repr_fmt_spec.format(self)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __eq__(self, other):
return (
type(self) == type(other)
and np.allclose(self.p0, other.p0)
and np.allclose(self.omega, other.omega)
and np.allclose(self.k, other.k)
and self.medium == other.medium
and self.physical_size == other.physical_size
)
@property
def k(self):
return self.omega / self.medium.c
@k.setter
def k(self, value):
self._omega = value * self.medium.c
@property
def omega(self):
return self._omega
@omega.setter
def omega(self, value):
self._omega = value
@property
def freq(self):
return self.omega / 2 / np.pi
@freq.setter
def freq(self, value):
self.omega = value * 2 * np.pi
@property
def wavelength(self):
return 2 * np.pi / self.k
@wavelength.setter
def wavelength(self, value):
self.k = 2 * np.pi / value
def pressure(self, source_positions, source_normals, receiver_positions, **kwargs):
"""Calculate the complex sound pressure from the transducer.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
Returns
-------
out : numpy.ndarray
The pressure at the locations, shape `source_positions.shape[1:] + receiver_positions.shape[1:]`.
"""
return self.pressure_derivs(source_positions=source_positions, source_normals=source_normals, receiver_positions=receiver_positions, orders=0, **kwargs)[0]
def pressure_derivs(self, source_positions, source_normals, receiver_positions, orders=3, **kwargs):
"""Calculate the spatial derivatives of the greens function.
Calculates Cartesian spatial derivatives of the pressure Green's function. Should be implemented by concrete subclasses.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : numpy.ndarray
Array with the calculated derivatives. Has the shape `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
raise NotImplementedError('Transducer model of type `{}` has not implemented cartesian pressure derivatives'.format(self.__class__.__name__))
class PointSource(TransducerModel):
r"""Point source transducers.
A point source is in this context defines as a spherically spreading wave,
optionally with a directivity. On its own this class defines a monopole,
but subclasses are free to change the directivity to other shapes.
The spherical spreading is defined as
.. math:: G(r) = {e^{ikr} \over r}
where :math:`r` is the distance from the source, and :math:`k` is the wavenumber of the wave.
"""
def directivity(self, source_positions, source_normals, receiver_positions):
"""Evaluate transducer directivity.
Subclasses will preferably implement this to create new directivity models.
Default implementation is omnidirectional sources.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
Returns
-------
out : numpy.ndarray
The amplitude (and phase) of the directivity, shape `source_positions.shape[1:] + receiver_positions.shape[1:]`.
"""
return np.ones(np.asarray(source_positions).shape[1:2] + np.asarray(receiver_positions).shape[1:])
def pressure_derivs(self, source_positions, source_normals, receiver_positions, orders=3, **kwargs):
"""Calculate the spatial derivatives of the greens function.
This is the combination of the derivative of the spherical spreading, and
the derivatives of the directivity, including source strength.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : numpy.ndarray
Array with the calculated derivatives. Has the `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
receiver_positions = np.asarray(receiver_positions)
if receiver_positions.shape[0] != 3:
raise ValueError('Incorrect shape of positions')
wavefront_derivatives = self.wavefront_derivatives(source_positions, receiver_positions, orders)
if type(self) == PointSource:
return wavefront_derivatives * self.p0
directivity_derivatives = self.directivity_derivatives(source_positions, source_normals, receiver_positions, orders)
derivatives = np.empty(wavefront_derivatives.shape, dtype=np.complex128)
derivatives[0] = wavefront_derivatives[0] * directivity_derivatives[0]
if orders > 0:
derivatives[1] = wavefront_derivatives[0] * directivity_derivatives[1] + directivity_derivatives[0] * wavefront_derivatives[1]
derivatives[2] = wavefront_derivatives[0] * directivity_derivatives[2] + directivity_derivatives[0] * wavefront_derivatives[2]
derivatives[3] = wavefront_derivatives[0] * directivity_derivatives[3] + directivity_derivatives[0] * wavefront_derivatives[3]
if orders > 1:
derivatives[4] = wavefront_derivatives[0] * directivity_derivatives[4] + directivity_derivatives[0] * wavefront_derivatives[4] + 2 * directivity_derivatives[1] * wavefront_derivatives[1]
derivatives[5] = wavefront_derivatives[0] * directivity_derivatives[5] + directivity_derivatives[0] * wavefront_derivatives[5] + 2 * directivity_derivatives[2] * wavefront_derivatives[2]
derivatives[6] = wavefront_derivatives[0] * directivity_derivatives[6] + directivity_derivatives[0] * wavefront_derivatives[6] + 2 * directivity_derivatives[3] * wavefront_derivatives[3]
derivatives[7] = wavefront_derivatives[0] * directivity_derivatives[7] + directivity_derivatives[0] * wavefront_derivatives[7] + wavefront_derivatives[1] * directivity_derivatives[2] + directivity_derivatives[1] * wavefront_derivatives[2]
derivatives[8] = wavefront_derivatives[0] * directivity_derivatives[8] + directivity_derivatives[0] * wavefront_derivatives[8] + wavefront_derivatives[1] * directivity_derivatives[3] + directivity_derivatives[1] * wavefront_derivatives[3]
derivatives[9] = wavefront_derivatives[0] * directivity_derivatives[9] + directivity_derivatives[0] * wavefront_derivatives[9] + wavefront_derivatives[2] * directivity_derivatives[3] + directivity_derivatives[2] * wavefront_derivatives[3]
if orders > 2:
derivatives[10] = wavefront_derivatives[0] * directivity_derivatives[10] + directivity_derivatives[0] * wavefront_derivatives[10] + 3 * (directivity_derivatives[4] * wavefront_derivatives[1] + wavefront_derivatives[4] * directivity_derivatives[1])
derivatives[11] = wavefront_derivatives[0] * directivity_derivatives[11] + directivity_derivatives[0] * wavefront_derivatives[11] + 3 * (directivity_derivatives[5] * wavefront_derivatives[2] + wavefront_derivatives[5] * directivity_derivatives[2])
derivatives[12] = wavefront_derivatives[0] * directivity_derivatives[12] + directivity_derivatives[0] * wavefront_derivatives[12] + 3 * (directivity_derivatives[6] * wavefront_derivatives[3] + wavefront_derivatives[6] * directivity_derivatives[3])
derivatives[13] = wavefront_derivatives[0] * directivity_derivatives[13] + directivity_derivatives[0] * wavefront_derivatives[13] + wavefront_derivatives[2] * directivity_derivatives[4] + directivity_derivatives[2] * wavefront_derivatives[4] + 2 * (wavefront_derivatives[1] * directivity_derivatives[7] + directivity_derivatives[1] * wavefront_derivatives[7])
derivatives[14] = wavefront_derivatives[0] * directivity_derivatives[14] + directivity_derivatives[0] * wavefront_derivatives[14] + wavefront_derivatives[3] * directivity_derivatives[4] + directivity_derivatives[3] * wavefront_derivatives[4] + 2 * (wavefront_derivatives[1] * directivity_derivatives[8] + directivity_derivatives[1] * wavefront_derivatives[8])
derivatives[15] = wavefront_derivatives[0] * directivity_derivatives[15] + directivity_derivatives[0] * wavefront_derivatives[15] + wavefront_derivatives[1] * directivity_derivatives[5] + directivity_derivatives[1] * wavefront_derivatives[5] + 2 * (wavefront_derivatives[2] * directivity_derivatives[7] + directivity_derivatives[2] * wavefront_derivatives[7])
derivatives[16] = wavefront_derivatives[0] * directivity_derivatives[16] + directivity_derivatives[0] * wavefront_derivatives[16] + wavefront_derivatives[3] * directivity_derivatives[5] + directivity_derivatives[3] * wavefront_derivatives[5] + 2 * (wavefront_derivatives[2] * directivity_derivatives[9] + directivity_derivatives[2] * wavefront_derivatives[9])
derivatives[17] = wavefront_derivatives[0] * directivity_derivatives[17] + directivity_derivatives[0] * wavefront_derivatives[17] + wavefront_derivatives[1] * directivity_derivatives[6] + directivity_derivatives[1] * wavefront_derivatives[6] + 2 * (wavefront_derivatives[3] * directivity_derivatives[8] + directivity_derivatives[3] * wavefront_derivatives[8])
derivatives[18] = wavefront_derivatives[0] * directivity_derivatives[18] + directivity_derivatives[0] * wavefront_derivatives[18] + wavefront_derivatives[2] * directivity_derivatives[6] + directivity_derivatives[2] * wavefront_derivatives[6] + 2 * (wavefront_derivatives[3] * directivity_derivatives[9] + directivity_derivatives[3] * wavefront_derivatives[9])
derivatives[19] = wavefront_derivatives[0] * directivity_derivatives[19] + wavefront_derivatives[19] * directivity_derivatives[0] + wavefront_derivatives[1] * directivity_derivatives[9] + wavefront_derivatives[2] * directivity_derivatives[8] + wavefront_derivatives[3] * directivity_derivatives[7] + directivity_derivatives[1] * wavefront_derivatives[9] + directivity_derivatives[2] * wavefront_derivatives[8] + directivity_derivatives[3] * wavefront_derivatives[7]
derivatives *= self.p0
return derivatives
def wavefront_derivatives(self, source_positions, receiver_positions, orders=3):
"""Calculate the spatial derivatives of the spherical spreading.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : ndarray
Array with the calculated derivatives. Has the shape `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
source_positions = np.asarray(source_positions)
receiver_positions = np.asarray(receiver_positions)
if receiver_positions.shape[0] != 3:
raise ValueError('Incorrect shape of positions')
diff = receiver_positions.reshape((3,) + (1,) * (source_positions.ndim - 1) + receiver_positions.shape[1:]) - source_positions.reshape(source_positions.shape[:2] + (receiver_positions.ndim - 1) * (1,))
r = np.sum(diff**2, axis=0)**0.5
kr = self.k * r
jkr = 1j * kr
phase = np.exp(jkr)
derivatives = np.empty((utils.num_pressure_derivs[orders],) + r.shape, dtype=np.complex128)
derivatives[0] = phase / r
if orders > 0:
coeff = (jkr - 1) * phase / r**3
derivatives[1] = diff[0] * coeff
derivatives[2] = diff[1] * coeff
derivatives[3] = diff[2] * coeff
if orders > 1:
coeff = (3 - kr**2 - 3 * jkr) * phase / r**5
const = (jkr - 1) * phase / r**3
derivatives[4] = diff[0]**2 * coeff + const
derivatives[5] = diff[1]**2 * coeff + const
derivatives[6] = diff[2]**2 * coeff + const
derivatives[7] = diff[0] * diff[1] * coeff
derivatives[8] = diff[0] * diff[2] * coeff
derivatives[9] = diff[1] * diff[2] * coeff
if orders > 2:
const = (3 - 3 * jkr - kr**2) * phase / r**5
coeff = (-15 + 15 * jkr + 6 * kr**2 - 1j * kr**3) * phase / r**7
derivatives[10] = diff[0] * (3 * const + diff[0]**2 * coeff)
derivatives[11] = diff[1] * (3 * const + diff[1]**2 * coeff)
derivatives[12] = diff[2] * (3 * const + diff[2]**2 * coeff)
derivatives[13] = diff[1] * (const + diff[0]**2 * coeff)
derivatives[14] = diff[2] * (const + diff[0]**2 * coeff)
derivatives[15] = diff[0] * (const + diff[1]**2 * coeff)
derivatives[16] = diff[2] * (const + diff[1]**2 * coeff)
derivatives[17] = diff[0] * (const + diff[2]**2 * coeff)
derivatives[18] = diff[1] * (const + diff[2]**2 * coeff)
derivatives[19] = diff[0] * diff[1] * diff[2] * coeff
return derivatives
def directivity_derivatives(self, source_positions, source_normals, receiver_positions, orders=3):
"""Calculate the spatial derivatives of the directivity.
The default implementation uses finite difference stencils to evaluate the
derivatives. In principle this means that customized directivity models
does not need to implement their own derivatives, but can do so for speed
and precision benefits.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : numpy.ndarray
Array with the calculated derivatives. Has the shape `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
source_positions = np.asarray(source_positions)
source_normals = np.asarray(source_normals)
receiver_positions = np.asarray(receiver_positions)
if receiver_positions.shape[0] != 3:
raise ValueError('Incorrect shape of positions')
finite_difference_coefficients = {'': (np.array([[0, 0, 0]]).T, np.array([1]))}
if orders > 0:
finite_difference_coefficients['x'] = (np.array([[1, 0, 0], [-1, 0, 0]]).T, np.array([0.5, -0.5]))
finite_difference_coefficients['y'] = (np.array([[0, 1, 0], [0, -1, 0]]).T, np.array([0.5, -0.5]))
finite_difference_coefficients['z'] = (np.array([[0, 0, 1], [0, 0, -1]]).T, np.array([0.5, -0.5]))
if orders > 1:
finite_difference_coefficients['xx'] = (np.array([[1, 0, 0], [0, 0, 0], [-1, 0, 0]]).T, np.array([1, -2, 1])) # Alt -- (np.array([[2, 0, 0], [0, 0, 0], [-2, 0, 0]]), [0.25, -0.5, 0.25])
finite_difference_coefficients['yy'] = (np.array([[0, 1, 0], [0, 0, 0], [0, -1, 0]]).T, np.array([1, -2, 1])) # Alt-- (np.array([[0, 2, 0], [0, 0, 0], [0, -2, 0]]), [0.25, -0.5, 0.25])
finite_difference_coefficients['zz'] = (np.array([[0, 0, 1], [0, 0, 0], [0, 0, -1]]).T, np.array([1, -2, 1])) # Alt -- (np.array([[0, 0, 2], [0, 0, 0], [0, 0, -2]]), [0.25, -0.5, 0.25])
finite_difference_coefficients['xy'] = ( | np.array([[1, 1, 0], [-1, -1, 0], [1, -1, 0], [-1, 1, 0]]) | numpy.array |
# -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
import scipy.optimize as opt
"""
Provides some useful mathematical functions which are not present in numpy.
"""
import numpy as np
def prod(x):
p = 1
for k in x:
p *= k
return p
def perm(x):
"""
permutation with repetition of n factors
x = [n1,n2,n3,...nk]
n = n1+n2+...+nk
return n!/(n1!*n2*...*nk!)
"""
n = sum(x)
num = list(range(2,n+1))
denom = list(range(2,x[0]+1))
for k in x[1:]:
denom += list(range(2,k+1))
for x in num:
if x in denom:
num.remove(x)
denom.remove(x)
return prod(num)/prod(denom)
def closest_arg(array, value):
return np.argmin(np.abs(array-value))
def strictly_positify(x):
"""
Make the result strictly positive by setting the minimum value to
the lower allowed float value
"""
return np.fmax(x, np.finfo(x.dtype).eps)
def positify(x):
"""
Set to zero all negative values
"""
return np.fmax(0, x)
def clip01(x):
"""
clip data x between 0 and 1
"""
return np.fmax(np.fmin(x, 1), 0)
def fact(x):
"""
Return the factors of an integer as a list.
Warning: This function is not a factorial!
"""
if x < 0 or type(x) is not int:
raise ValueError("input must be a positive integer")
if x < 2:
return x
f = []
i = 2
while True:
while x % i == 0:
f.append(i)
x /= i
i += 1
if x == 1:
return f
def moving_average(x, N):
assert len(x) > N
c = np.cumsum(x)
return (c[N:]-c[:-N])/N
def butter_lowpass(cutOff, fs, order=5):
import scipy.signal
nyq = 0.5 * fs
normalCutoff = cutOff / nyq
b, a = scipy.signal.butter(order, normalCutoff, btype='low', analog=True)
return b, a
def butter_lowpass_filter(data, cutOff, fs, order=4):
import scipy.signal
b, a = butter_lowpass(cutOff, fs, order=order)
y = scipy.signal.lfilter(b, a, data)
return y
def Gauss(x, x0, s, amp=None, **kargs):
if 'A' in kargs:
from warnings import warn
warn("Parameter A is deprecated. Please use amp in order to set the amplitude!")
amp = kargs.pop('A')
elif 'Amp' in kargs:
from warnings import warn
warn("Parameter Amp is deprecated. Please use amp in order to set the amplitude!")
amp = kargs.pop('Amp')
R = np.exp(-(x-x0)**2/(2*s**2))
if amp is None:
R /= (s*np.sqrt(2*np.pi))
else:
R *= amp
R[s==0] = (x[s==0]==x0)*1.0
return R
def Lorentz(x, x0, gamma, amp=None, **kargs):
if 'A' in kargs:
from warnings import warn
warn("Parameter A is deprecated. Please use amp in order to set the amplitude!")
Amp = kargs['A']
elif 'Amp' in kargs:
from warnings import warn
warn("Parameter Amp is deprecated. Please use amp in order to set the amplitude!")
amp = kargs.pop('Amp')
R = 1/((x-x0)**2+(.5*gamma)**2)
if amp is None:
return .5*gamma*R/np.pi
return amp*R*(.5*gamma)**2
def CDF(x,mu,sig, amp=1, lg=0, **kargs):
if 'Amp' in kargs:
from warnings import warn
warn("Parameter Amp is deprecated. Please use amp in order to set the amplitude!")
amp = kargs.pop('Amp')
from scipy.special import erf
g = sig*np.sqrt(2*np.log(2))
return amp*lg*(.5+ | np.arctan2(x-mu,g) | numpy.arctan2 |
# model.distributions.py
# copyright 2021 <NAME>
import numpy as np
import pymc3 as pm
from scipy import stats, special
import theano.tensor as tt
from pymc3.distributions.dist_math import bound, logpow, alltrue_elemwise
from pymc3.distributions.continuous import assert_negative_support, PositiveContinuous
from pymc3.distributions.distribution import draw_values, generate_samples
from pymc3.theanof import floatX
RANDOM_SEED = 42
rng = np.random.default_rng(seed=RANDOM_SEED)
# NOTE hack to clip values away from {0, 1} for invcdfs
# Whilst value = {0, 1} is theoretically allowed, is seems to cause a
# numeric compuational issue somewhere in tt.erfcinv which throws infs.
# This screws up the downstream, so clip slightly away from {0, 1}
CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS = 1e-15 #1e-18 too small
def boundzero_numpy(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, 0.)
def boundzero_theano(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return tt.switch(alltrue_elemwise(conditions), vals, 0.)
def boundlog_numpy(vals, *conditions):
""" Bound log unit distribution params, return -inf for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, -np.inf)
def logpow_numpy(x, m):
""" Copy from pymc3
Safe calc log(x**m) since m*log(x) will fail when m, x = 0.
"""
return np.where(x == 0, np.where(m == 0, 0.0, -np.inf), m * np.log(x))
class Gamma(pm.Gamma):
"""Inherit the pymc class, add cdf and invcdf """
def __init__(self):
raise NotImplementedError(
"""Consider that InvCDF is hard to calculate: even scipy uses C functions
Recommend use different dist in practice""")
class GammaNumpy():
"""Gamma PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations used in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
Ref: https://en.wikipedia.org/wiki/Gamma_distribution
Params: x > 0, u in [0, 1], a (shape) > 0, b (rate) > 0
"""
def __init__(self):
self.name = 'Gamma'
self.notation = {'notation': r'x \sim Gamma(\alpha, \beta)'}
self.dist_natural = {
'pdf': r'f(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \beta^{\alpha} x^{\alpha-1} e^{- \beta x}',
'cdf': r'F(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \gamma(\alpha, \beta x)',
'invcdf': r'F^{-1}(u \mid \alpha, \beta) = '}
self.dist_log = {
'logpdf': r'\log f(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \beta^{\alpha} + \log x^{\alpha-1} - \beta x',
'logcdf': r'\log F(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \gamma(\alpha, \beta x)',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, \beta) = '}
self.conditions = {
'parameters': r'\alpha > 0 \, \text{(shape)}, \; \beta > 0 \, \text{(rate)}',
'support': r'x \in (0, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r'\frac{\alpha}{\beta}',
'mode': r'\frac{\alpha - 1}{\beta}, \; \text{for} \alpha \geq 1',
'variance': r'\frac{\alpha}{\beta^{2}}'
}
def pdf(self, x, a, b):
"""Gamma PDF
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2595
"""
fn = (1 / special.gamma(a)) * np.power(b, a) * np.power(x, a-1) * np.exp(-b * x)
return boundzero_numpy(fn, a > 0, b > 0, x >= 0)
def cdf(self, x, a, b):
"""Gamma CDF:
where $\gamma(a, bx)$ is lower incomplete gamma function [0, lim)
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2602
"""
# fn = (1 / special.gamma(a)) * special.gammainc(a, b * x)
fn = special.gammainc(a, b * x)
return boundzero_numpy(fn, a > 0, b > 0, x >= 0)
def invcdf(self, u, a, b):
"""Gamma Inverse CDF aka PPF:
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2608
see sc.gammainc()
"""
raise NotImplementedError('TODO gamma inverse CDF')
def logpdf(self, x, a, b):
"""Gamma log PDF
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2599
"""
fn = -special.gammaln(a) + logpow_numpy(b, a) + logpow_numpy(x, a-1) - b * x
return boundlog_numpy(fn, a > 0, b > 0, x > 0)
def logcdf(self, x, a, b):
"""Gamma log CDF:
where $\gamma(a, bx)$ is lower incomplete gamma function [0, lim)
compare to https://github.com/pymc-devs/pymc3/blob/41a25d561b3aa40c75039955bf071b9632064a66/pymc3/distributions/continuous.py#L2614
"""
return boundlog_numpy((-special.gammaln(a)) + special.gammainc(a, b * x),
a > 0, b > 0, x > 0)
def loginvcdf(self, u, a, b):
"""Gamma log Inverse CDF aka log PPF:
see sc.gammaincinv()
"""
raise NotImplementedError('TODO gamma log inverse CDF')
class Gumbel(pm.Gumbel):
"""Inherit the pymc class, add cdf, logcdf and invcdf, loginvcdf
Also clobber logp (!)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
def logp(self, value):
"""
JS patch refactored code to align with other distributions
Calculate log-probability of Gumbel distribution at specified value.
z = (x - mu) / b
pdf = (1 / b) * exp(-z - exp(-z))
logpdf = -log(b) - z - exp(-z)
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
beta = self.beta
z = (value - mu) / beta
logp = -tt.log(beta) - z - tt.exp(-z)
return bound(logp, beta > 0)
def logcdf(self, value):
"""
JS patch refactored code to align with other distributions
cdf = exp(-exp(-(X - mu) / b))
logcdf = -exp(-(X-mu)/b)
Compute the log of the cumulative distribution function for
Gumbel distribution at the specified value.
Parameters
----------
value: numeric
Value(s) for which log CDF is calculated. If the log CDF for
multiple values are desired the values must be provided in a
numpy array or theano tensor.
Returns
-------
TensorVariable
"""
beta = self.beta
mu = self.mu
logcdf = -tt.exp(-(value - mu)/beta)
return bound(logcdf, beta > 0)
def loginvcdf(self, value):
"""
JS new function
invcdf = mu - b * log(-log(u))
loginvcdf = log(mu) + log(1 - (b * log(-log(u))/mu))
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
beta = self.beta
mu = self.mu
loginvcdf = tt.log(mu) + tt.log(1 - (beta * tt.log(-tt.log(value))/mu))
return bound(loginvcdf, beta > 0)
class InverseWeibull(PositiveContinuous):
r"""
Inverse Weibull log-likelihood, the reciprocal of the Weibull distribution,
also known as the Fréchet distribution, a special case of the generalized
extreme value distribution.
See scipy for reference
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html
https://github.com/scipy/scipy/blob/v1.6.0/scipy/stats/_continuous_distns.py
The pdf of this distribution is
.. math::
f(x \mid \alpha, s, m) =
\frac{\alpha }{s}} \; \left({\frac{x-m}{s}}\right)^{{-1-\alpha }}\;e^{{-({\frac{x-m}{s}})^{{-\alpha }}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 500)
alphas = [1., 2., 3., 3.]
betas = [1., 1., 1., .5]
for a, b in zip(alphas, betas):
pdf = st.invgamma.pdf(x, a, scale=b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ======================================================
Support :math:`x \in (-\infty, \infty)`
Mean :math:`{\begin{cases}\ m+s\Gamma \left(1-{\frac {1}{\alpha }}\right)&{\text{for }}\alpha >1\\\ \infty &{\text{otherwise}}\end{cases}}`
Variance :math:`{\begin{cases}\ s^{2}\left(\Gamma \left(1-{\frac {2}{\alpha }}\right)-\left(\Gamma \left(1-{\frac{1}{\alpha }}\right)\right)^{2}\right)&{\text{for }}\alpha >2\\\ \infty &{\text{otherwise}}\end{cases}}`
======== ======================================================
Parameters
----------
alpha: float
Shape parameter (alpha > 0).
s: float
Scale parameter (s > 0), default = 1
## m: float
## Location parameter (mu in (-inf, inf)), default = 0
"""
def __init__(self, alpha=None, s=1., *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.s = s = tt.as_tensor_variable(floatX(s))
self.mode = s * tt.power(alpha / (1. + alpha), 1. / alpha)
assert_negative_support(alpha, "alpha", "InverseWeibull")
assert_negative_support(s, "s", "InverseWeibull")
def _distr_parameters_for_repr(self):
return ["alpha", 's']
def random(self, point=None, size=None):
"""
Draw random values from InverseWeibull PDF distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, s = draw_values([self.alpha, self.s], point=point, size=size)
return generate_samples(stats.invweibull.rvs, c=alpha, scale=s, loc=0.,
dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of InverseWeibull distribution at specified value.
pdf: https://www.wolframalpha.com/input/?i=%28a%2Fs%29+*+%28x%2Fs%29**%28-1-a%29+*+exp%28-%28x%2Fs%29**-a%29
alt form according to WA: a e^(-(s/x)^a) s^a x^(-1 - a)
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
s = self.s
return bound(
(
tt.log(alpha) -
tt.log(s) +
logpow(s / value, 1. + alpha) -
tt.power(s / value, alpha) # this term grossly dominates if alpha >> 2
),
value > 0.,
alpha > 0.,
s > 0.
)
def cdf(self, value):
"""InverseWeibull CDF"""
alpha = self.alpha
s = self.s
fn = tt.exp(-tt.power(value / s, -alpha))
return boundzero_theano(fn, alpha > 0, s > 0, value > 0)
def logcdf(self, value):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
alpha = self.alpha
s = self.s
fn = -tt.power(value / s, -alpha)
return bound(fn, alpha > 0, s > 0, value > 0)
def invcdf(self, value):
"""InverseWeibull Inverse CDF aka PPF"""
alpha = self.alpha
s = self.s
value = tt.clip(value, CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS,
1-CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS)
fn = s * tt.power(-tt.log(value), -1. / alpha)
return boundzero_theano(fn, alpha > 0, s > 0, value >= 0, value <= 1)
def loginvcdf(self, value):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
alpha = self.alpha
s = self.s
fn = tt.log(s) - (1./ alpha ) * tt.log(-tt.log(value))
return bound(fn, alpha > 0, s > 0, value >= 0, value <= 1)
class InverseWeibullNumpy():
"""Inverse Weibull PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations potentially used if needed in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
NOTE: I'm lazy and have set m=0 throughout: this suits my usecase anyhow
Ref: https://en.wikipedia.org/wiki/Fréchet_distribution
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html?highlight=inverse%20weibull
Params: alpha (shape) > 0, s (scale) > 0, m (location of minimum) = 0
Support: x > 0, u in [0, 1]
"""
def __init__(self):
self.name = 'InverseWeibull'
self.notation = {'notation': r'x \sim InverseWeibull(\alpha, s, m=0)'}
self.dist_natural = {
'pdf': r"""f(x \mid \alpha, s, m=0) = \frac{\alpha}{s} \;
\left( \frac{x}{s} \right)^{-1-\alpha} \;
\exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)""",
'cdf': r'F(x \mid \alpha, s, m=0) = \exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)',
'invcdf': r"""F^{-1}(u \mid \alpha, s, m=0) = s \log(u)^{-\frac{1}{\alpha}}"""}
self.dist_log = {
'logpdf': r"""\log f(x \mid \alpha, s, m=0) = \log{\alpha} - (1+\alpha)\log{x} +
\alpha \log{s} - \left( \frac{x}{s} \right)^{-\alpha}""",
'logcdf': r'\log F(x \mid \alpha, s, m=0) = - \left( \frac{x}{s} \right)^{-\alpha}',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, s, m=0) = \log(s) - \frac{1}{\alpha} * \log(-\log(u))'}
self.conditions = {
'parameters': r"""\alpha > 0 \, \text{(shape)}, \;
s > 0 \, \text{(scale, default } s=1 \text{)}, \;
m \in (-\infty, \infty) \, \text{(location of minimum, default } m=0 \text{)}""",
'support': r'x \in (m, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r"""
\begin{cases}
m + s \Gamma \left( 1 - \frac{1}{\alpha} \right) & \text{for } \alpha > 1 \\
\infty & \text{otherwise} \\
\end{cases}""",
'mode': r'm + s \left( \frac{\alpha}{1+\alpha} \right)^{1/\alpha}',
'variance': r"""
\begin{cases}
s^{2} \left( \Gamma \left( 1-\frac{2}{\alpha} \right) -
\left( \Gamma \left( 1-\frac{1}{\alpha} \right) \right)^{2}
\right) & \text{for } \alpha > 2 \\
\infty & \text{otherwise}
\end{cases}"""
}
def pdf(self, x, a, s):
"""InverseWeibull PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3919
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
(a/s) *
np.power(x/s, -1.-a) *
np.exp(-np.power(x/s, -a))
)
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def cdf(self, x, a, s):
"""InverseWeibull CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3926
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.exp(-np.power(x/s, -a))
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def invcdf(self, u, a, s):
"""InverseWeibull Inverse CDF aka PPF:
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3930
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = s * np.power(-np.log(u), -1./a)
return boundzero_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
def logpdf(self, x, a, s):
"""InverseWeibull log PDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
np.log(a) - np.log(s) +
logpow_numpy(x/s, -1.-a) -
np.power(x/s, -a) # this term grossly dominates if a >> 2
)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def logcdf(self, x, a, s):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = -np.power(x/s, -a)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def loginvcdf(self, u, a, s):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.log(s) - (1./a) * np.log(-np.log(u))
return boundlog_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
class ZeroInflatedInverseWeibull(PositiveContinuous):
r"""
ZeroInflatedInvserseWeibull log-likelihood
WIP! Mixture model to allow for observations dominated by zeros such as sev
also see
+ McElreath 2014, http://xcelab.net/rmpubs/Mcelreath%20Koster%202014.pdf,
https://github.com/rmcelreath/mcelreath-koster-human-nature-2014
+ Jones 2013, https://royalsocietypublishing.org/doi/10.1098/rspb.2013.1210
+ https://stackoverflow.com/questions/42409761/pymc3-nuts-has-difficulty-sampling-from-a-hierarchical-zero-inflated-gamma-mode
The pmf of this distribution is
.. math::
f(x \mid \psi, \alpha, s) = \left\{
\begin{array}{l}
(1 - \psi), & \text{if } x = 0 \\
\psi \, \text{InverseWeibull}(\alpha, s), & \text{if } x > 0
\end{array}
\right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi \, \text{InverseWeibull}(\mu, \sigma)`
Variance :math: TODO
======== ==========================
Parameters
----------
psi: float
Expected proportion of InverseWeibull variates (0 <= psi <= 1)
alpha: float
s: float
"""
def __init__(self, psi, alpha, s, *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
self.psi = psi = tt.as_tensor_variable(floatX(psi))
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.s = s = tt.as_tensor_variable(floatX(s))
self.invweibull = InverseWeibull.dist(alpha=alpha, s=s)
# TODO
#self.mean = self.psi * self.invweibull.mean
self.mode = self.psi * self.invweibull.mode
assert_negative_support(alpha, "alpha", "ZeroInflatedInverseWeibull")
assert_negative_support(s, "s", "ZeroInflatedInverseWeibull")
# def _random(self, psi, size=None):
# """Note by definition any rvs_ from invweibull that are zero will
# correctly remain zero, covering the case x = 0"""
# rvs_ = self.invweibull.random(size=size)
# return rvs_ * psi
def _random(self, psi, size=None):
"""Inputs are numpy arrays"""
rvs_ = self.invweibull.random(size=size)
pi = stats.binom(n=np.repeat([1], len(psi)), p=psi).rvs(len(psi))
return rvs_ * pi
def random(self, point=None, size=None):
"""
Draw random values from ZeroInflatedInverseWeibull PDF distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
psi, alpha, s = draw_values([self.psi, self.alpha, self.s],
point=point, size=size)
return generate_samples(self._random, psi,
dist_shape=self.shape, size=size)
def logp(self, value):
"""LogPDF"""
psi = self.psi
logp_ = tt.switch(tt.neq(value, 0), # or use tt.gt(value, 0), dunno which faster
tt.log(psi) + self.invweibull.logp(value),
tt.log1p(-psi))
return bound(logp_, value >=0, psi > 0, psi < 1)
def cdf(self, value):
"""CDF"""
psi = self.psi
cdf_ = (1. - psi) * 1 + psi * self.invweibull.cdf(value)
return boundzero_theano(cdf_, value >=0, psi > 0, psi < 1)
def invcdf(self, value):
"""InvCDF aka PPF"""
psi = self.psi
invcdf_ = self.invweibull.invcdf((value + psi - 1) / psi)
return boundzero_theano(invcdf_, value>=0, value<=1, psi > 0, psi < 1)
class ZeroInflatedInverseWeibullNumpy():
"""Zero-inflated Inverse Weibull PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations potentially used if needed in pymc3 custom distributions
Helpful to compare these ? seems rare
NOTE: I'm lazy and have set m=0 throughout: this suits my usecase anyhow
Ref: https://en.wikipedia.org/wiki/Fréchet_distribution
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html?highlight=inverse%20weibull
Params: 0 < psi < 1 (prop invweibull), alpha (shape) > 0, s (scale) > 0, m (location of minimum) = 0
Support: x > 0, u in [0, 1]
"""
def __init__(self):
self.name = 'InverseWeibull'
self.notation = {'notation': r'x \sim InverseWeibull(\alpha, s, m=0)'}
self.dist_natural = {
'pdf': r"""f(x \mid \alpha, s, m=0) = \frac{\alpha}{s} \;
\left( \frac{x}{s} \right)^{-1-\alpha} \;
\exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)""",
'cdf': r'F(x \mid \alpha, s, m=0) = \exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)',
'invcdf': r"""F^{-1}(u \mid \alpha, s, m=0) = s \log(u)^{-\frac{1}{\alpha}}"""}
self.dist_log = {
'logpdf': r"""\log f(x \mid \alpha, s, m=0) = \log{\alpha} - (1+\alpha)\log{x} +
\alpha \log{s} - \left( \frac{x}{s} \right)^{-\alpha}""",
'logcdf': r'\log F(x \mid \alpha, s, m=0) = - \left( \frac{x}{s} \right)^{-\alpha}',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, s, m=0) = \log(s) - \frac{1}{\alpha} * \log(-\log(u))'}
self.conditions = {
'parameters': r"""\alpha > 0 \, \text{(shape)}, \;
s > 0 \, \text{(scale, default } s=1 \text{)}, \;
m \in (-\infty, \infty) \, \text{(location of minimum, default } m=0 \text{)}""",
'support': r'x \in (m, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r"""
\begin{cases}
m + s \Gamma \left( 1 - \frac{1}{\alpha} \right) & \text{for } \alpha > 1 \\
\infty & \text{otherwise} \\
\end{cases}""",
'mode': r'm + s \left( \frac{\alpha}{1+\alpha} \right)^{1/\alpha}',
'variance': r"""
\begin{cases}
s^{2} \left( \Gamma \left( 1-\frac{2}{\alpha} \right) -
\left( \Gamma \left( 1-\frac{1}{\alpha} \right) \right)^{2}
\right) & \text{for } \alpha > 2 \\
\infty & \text{otherwise}
\end{cases}"""
}
def pdf(self, x, a, s):
"""InverseWeibull PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3919
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
(a/s) *
np.power(x/s, -1.-a) *
np.exp(-np.power(x/s, -a))
)
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def cdf(self, x, a, s):
"""InverseWeibull CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3926
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.exp(-np.power(x/s, -a))
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def invcdf(self, u, a, s):
"""InverseWeibull Inverse CDF aka PPF:
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3930
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = s * np.power(-np.log(u), -1./a)
return boundzero_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
def logpdf(self, x, a, s):
"""InverseWeibull log PDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
np.log(a) - np.log(s) +
logpow_numpy(x/s, -1.-a) -
np.power(x/s, -a) # this term grossly dominates if a >> 2
)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def logcdf(self, x, a, s):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = -np.power(x/s, -a)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def loginvcdf(self, u, a, s):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.log(s) - (1./a) * np.log(-np.log(u))
return boundlog_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
class Kumaraswamy(pm.Kumaraswamy):
"""Inherit the pymc class, add cdf, logcdf and invcdf, loginvcdf"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def logcdf(self, value):
"""
JS new function
cdf = 1 - (1 - X**a)**b
logcdf = log(1) + log(1 - ((1 - X**a)**b / 1)) = log(1 - (1 - X**a)**b)
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
a = self.a
b = self.b
logcdf = tt.log(1 - (1 - value ** a) ** b)
return bound(logcdf, value >= 0, value <= 1, a > 0, b > 0)
def loginvcdf(self, value):
"""
JS new function
invcdf = (1 - (1-u) ** (1/b)) ** (1/a)
loginvcdf = (1/a) * np.log(1 - (1-u)**(1/b))
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
a = self.a
b = self.b
loginvcdf = (1/a) * tt.log(1 - (1-value)**(1/b))
return bound(loginvcdf, value >= 0, value <= 1, a > 0, b > 0)
class Lognormal(pm.Lognormal):
""" Inherit the pymc class, add cdf and invcdf """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def cdf(self, value):
"""Lognormal CDF"""
mu = self.mu
sigma = self.sigma
z = (tt.log(value) - mu) / sigma
fn = .5 * tt.erfc( -z / tt.sqrt(2.))
# convenience alt use pymc3's invprobit: # fn = pm.math.invprobit(z)
return boundzero_theano(fn, sigma > 0, value > 0)
def invcdf(self, value):
"""Lognormal Inverse CDF aka PPF"""
mu = self.mu
sigma = self.sigma
# value = tt.clip(value, CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS, 1-CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS)
fn = tt.exp(mu - sigma * tt.sqrt(2) * tt.erfcinv(2 * value))
return boundzero_theano(fn, sigma > 0, value >= 0, value <= 1)
class LognormalNumpy():
"""Lognormal PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations potentially used if needed in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
Ref: https://en.wikipedia.org/wiki/Log-normal_distribution
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.lognorm.html?highlight=lognorm#scipy.stats.lognorm
Params: mu (location) > 0, sigma (variance) > 0
Support: x > 0, u in [0, 1],
"""
def __init__(self):
self.name = 'Lognormal'
self.notation = {'notation': r'x \sim Lognormal(\mu, \sigma)'}
self.dist_natural = {
'pdf': r"""f(x \mid \mu, \sigma) = \frac{1}{x \sigma \sqrt{2 \pi}} \exp \left( -{ \frac{(\log{x} - \mu)^{2}}{2 \sigma^{2}}} \right)
= \frac{1}{x \sigma \sqrt{2 \pi}} \exp - \left(\frac{\log{x}-\mu}{\sigma \sqrt{2}} \right)^{2}""",
'cdf': r"""F(x \mid \mu, \sigma) = \frac{1}{2} \left[ 1 + \text{erf} \left(\frac{\log{x}-\mu}{\sigma \sqrt{2}} \right) \right]
= \frac{1}{2} \text{erfc} \left( \frac{-\log{x} -\mu}{\sigma \sqrt{2}} \right)""",
'invcdf': r"""F^{-1}(u \mid \mu, \sigma) = \exp \left( \mu + \sigma * \text{normal_invcdf}(u) \right)
= \exp \left( \mu - \sigma \sqrt{2} \text{erfcinv}(2u) \right)"""}
self.dist_log = {
'logpdf': r'\log f(x \mid \mu, \sigma) = - \frac{1}{2 \sigma^2} \log{(x-\mu)^{2}} + \frac{1}{2} \log{\frac{1}{2 \pi \sigma^{2}}} -\log{x}',
'logcdf': r'\log F(x \mid \mu, \sigma) = \log \left[\frac{1}{2} \text{erfc} \left( \frac{\log{(x)} -\mu}{\sigma \sqrt{2}} \right) \right]',
'loginvcdf': r'\log F^{-1}(u \mid \mu, \sigma) = \mu - \sigma \sqrt{2} \text{erfcinv}(2u)'}
self.conditions = {
'parameters': r'\mu \in (-\infty, \infty) \, \text{(location)}, \; \sigma > 0 \, \text{(std. dev.)}',
'support': r'x \in (0, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r'\exp \left( \mu +\frac{\sigma^{2}}{2} \right)',
'median': r'\exp ( \mu )',
'mode': r'\exp ( \mu - \sigma^{2} )',
'variance': r'[\exp (\sigma^{2}) - 1] \exp (2 \mu + \sigma^{2})'
}
def pdf(self, x, mu, sigma):
"""Lognormal PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L5050
"""
mu = np.array(mu).astype(np.float) #, casting='no')
sigma = np.array(sigma).astype(np.float) #, casting='no')
fn = ((1 / (x * sigma * np.sqrt(2 * np.pi))) *
np.exp( -np.power( (np.log(x) - mu) / (sigma * np.sqrt(2)), 2) ))
return boundzero_numpy(fn, sigma > 0, x > 0)
def cdf(self, x, mu, sigma):
"""Lognormal CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L5057
"""
mu = np.array(mu).astype(np.float) #, casting='no')
sigma = np.array(sigma).astype(np.float) #, casting='no')
z = (np.log(x) - mu) / sigma
fn = .5 * special.erfc( -z / np.sqrt(2))
return boundzero_numpy(fn, sigma > 0, x > 0)
def invcdf(self, u, mu, sigma):
"""Lognormal Inverse CDF aka PPF:
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L5063
"""
mu = np.array(mu).astype(np.float) #, casting='no')
sigma = np.array(sigma).astype(np.float) #, casting='no')
# u = np.maximum(np.minimum(u, 1-CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS), CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS)
fn = np.exp(mu - sigma * np.sqrt(2) * special.erfcinv(2 * u))
return boundzero_numpy(fn, sigma > 0, u >= 0, u <= 1)
def logpdf(self, x, mu, sigma):
"""Lognormal log PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L5054
ref: https://github.com/pymc-devs/pymc3/blob/41a25d561b3aa40c75039955bf071b9632064a66/pymc3/distributions/continuous.py#L1887
"""
mu = np.array(mu).astype(np.float) #, casting='no')
sigma = np.array(sigma).astype(np.float) #, casting='no')
fn = - np.power(np.log(x)-mu,2) / (2 * np.power(sigma, 2)) + .5 * np.log(1 / (2 * np.pi * np.power(sigma, 2))) - np.log(x)
return boundlog_numpy(fn, sigma > 0, x > 0)
def logcdf(self, x, mu, sigma):
"""Lognormal log CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L5060
ref: https://github.com/pymc-devs/pymc3/blob/41a25d561b3aa40c75039955bf071b9632064a66/pymc3/distributions/continuous.py#L1913
"""
mu = np.array(mu).astype(np.float) #, casting='no')
sigma = np.array(sigma).astype(np.float) #, casting='no')
fn = np.log(self.cdf(x, mu, sigma))
return boundlog_numpy(fn, sigma > 0, x > 0)
def loginvcdf(self, u, mu, sigma):
"""Lognormal log Inverse CDF aka log PPF
ref: ?
"""
mu = np.array(mu).astype(np.float) #, casting='no')
sigma = | np.array(sigma) | numpy.array |
import numpy as np
from model.model_geometry import node_distance
from model.constant_variables import (
D_rate_literature,
a_eta,
b_eta,
eta_0,
c_eta,
T_fus,
g,
rho_i,
pl1,
pl2,
)
def settling_vel(T, nz, coord, phi, SetVel, v_opt, viscosity):
"""
computes settling velocity, its spatial derivative and vertical stress
Arguments
-------------
T temperature [K]
nz number of computational nodes [-]
z mesh coordinates of computational nodes in the snowpack [m]
phi ice volume fraction [-]
SetVel settling active: 'Y'; settling inactive: 'N'
Returns
--------------
v settling velocity for each computational node in the snowpack
v_dz spatial derivative of the settling velocity
sigma vertical stress at each computational node in the snowpack
"""
dz = node_distance(coord, nz)
if SetVel == "N":
v = np.zeros(nz) # [m s-1]
v_dz = np.zeros(nz) # [s-1]
sigma = sigma_cont_croc(dz, phi, nz, v_opt) # [Pa m-2]
elif SetVel == "Y":
D_coeff = np.zeros(nz) # Deformation rate coefficient [s-1]
if v_opt == "continuous":
# many computational nodes approx. continuous
eta = choose_viscosity(T, phi, viscosity, dz, nz)
sigma = sigma_cont_croc(dz, phi, nz, v_opt)
(v, v_dz) = velocity(sigma, eta, dz, nz, viscosity)
elif v_opt == "layer_based":
# 2 layer case with 3 computational nodes
# mimicks layer based scheme
# only works with model geometry geom= layer_based0.5m_2Layer'
if nz != 3:
raise IndexError(
"For layer_based velocity only 3 computational nodes are allowed"
)
eta = choose_viscosity(T, phi, viscosity, dz, nz)
sigma = sigma_cont_croc(dz, phi, nz, v_opt)
(v, v_dz) = velocity(sigma, eta, dz, nz, viscosity)
elif v_opt == "polynom":
# linearly increasing with snow height
sigma = sigma_cont_croc(dz, phi, nz, v_opt)
D_coeff = -np.ones(nz) * D_rate_literature # deformation rate coefficient
D_rate = D_coeff # [1/s] Deformation rate
v = D_rate * coord # [m/s] settlement velocity
v_dz = D_rate
elif v_opt == "const":
# spatially constant settling velocity
v = -np.ones(nz) * D_rate_literature
v_dz = np.zeros(nz)
sigma = sigma_cont_croc(dz, phi, nz, v_opt)
elif v_opt == "phi_dependent":
# as found in firn models
v = np.zeros(nz) # [m s-1]
sigma = sigma_cont_croc(dz, phi, nz, v_opt)
phi_max = (0.4 - 0.9) / coord[-1] * coord + 0.9 # 0.25
restrict = 1 - phi / phi_max
D_coeff = -np.ones(nz) * D_rate_literature
D_rate = D_coeff * restrict # deformationrate
v_dz = D_rate.copy()
D_rate[0] = 0 # Deformation rate at bottom = 0
v[1:] = np.cumsum(D_rate[:-1] * dz[:]) # local settling velocity
v[0] = 0
else:
raise ValueError("Input for settling velocity v_opt not available")
else:
raise ValueError("Either N or Y allowed as input for SetVel")
return v, v_dz, sigma
def choose_viscosity(T, phi, viscosity, dz, nz):
"""
computes snow viscosity for snow based on a viscosity method (see Readme)
"""
T_const = 263
phi_const = 0.1125
eta = np.zeros_like(T)
restrict = (
np.exp(pl1 * phi - pl2) + 1
) # power law to restrict ice volume growth to <0.95
if viscosity == "eta_constant_n1":
# constant viscosity for linear stress strain relation, Glen's flow law n=1
etatest1 = (
eta_0
* rho_i
* phi_const
/ c_eta
* np.exp(a_eta * (T_fus - T_const) + b_eta * rho_i * phi_const)
)
# apply power law to restrict ice volume growth tp <0.95
eta = etatest1 * restrict
elif viscosity == "eta_phi": # visocosity controllfed by ice volume fraction
eta = (
eta_0
* rho_i
* phi
/ c_eta
* np.exp(a_eta * (T_fus - T_const) + b_eta * rho_i * phi)
)
elif viscosity == "eta_T": # visocosity controlled by temperature
eta = (
eta_0
* rho_i
* phi_const
/ c_eta
* np.exp(a_eta * (T_fus - T) + b_eta * rho_i * phi_const)
)
elif (
viscosity == "eta_phiT"
): # visocosity controlled by ice volume fraction and temperature
eta = (
eta_0
* rho_i
* phi
/ c_eta
* np.exp(a_eta * (T_fus - T) + b_eta * rho_i * phi)
)
elif viscosity == "eta_constant_n3":
# non-linear stress strain rate relation, Glens flow law n=3
rho_eff = np.ones(nz)
rho_eff[0] = 150
x1 = 0.5
nz1 = int(x1 * nz)
nz2 = nz
for i in range(nz1 - 1):
rho_eff[i] = 150
rho_eff[nz1 - 1] = 131.25
rho_eff[nz1] = 112.5
rho_eff[nz1 + 1] = 93.75
rho_eff[nz1 + 2 : nz2] = 75
sigma = np.zeros(nz)
sigma_Dz = np.zeros(nz)
sigma_Dz[:-1] = g * phi[:-1] * rho_i * dz[:]
sigma_Dz[
-1
] = 0 # no stress at heighest node, interface with atmosphere, no overburdened snow mass
sigma = | np.cumsum(sigma_Dz[::-1]) | numpy.cumsum |
"""
Created on Mon Aug 25 13:17:03 2014
@author: anthony
"""
import time
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as interp
from .cp_tools import cp_loglikelihood
from .cp_tools import cp_loglikelihood_proj
from .cp_tools import cp_model
from .cp_tools import mas2rad
from .cp_tools import project_cps
from .cp_tools import rad2mas
def phase_binary_flux(u, v, wavel, p, return_cvis=False):
"""Calculate the phases observed by an array on a binary star
----------------------------------------------------------------
p: 3-component vector (+2 optional), the binary "parameters":
- p[0] = sep (mas)
- p[1] = PA (deg) E of N.
- p[2] = flux (primary is assumed to be 1)
optional:
- p[2:] = contrast ratio for several wavelengths that we want
to calculate the cps over
- u,v: baseline coordinates (meters)
- wavel: wavelength (meters)
----------------------------------------------------------------"""
p = np.array(p)
# relative locations
th = (p[1] + 90.0) * np.pi / 180.0
ddec = mas2rad(p[0] * np.sin(th))
dra = -mas2rad(p[0] * np.cos(th))
# decompose into two "luminosities"
# but first, a little trick so this works whether
# p is a single value or a list of contrasts
spec = p[2:]
if len(spec) == 1:
spec = spec[0]
l2 = spec
l1 = 1 - l2
# phase-factor
output_shape = list(u.shape)
output_shape[-1] = | np.size(wavel) | numpy.size |
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def dg_package_data_u(u, flux_u, velocity_field, normal_covector,
mesh_velocity, normal_dot_mesh_velocity):
return u
def dg_package_data_normal_dot_flux_u(u, flux_u, velocity_field,
normal_covector, mesh_velocity,
normal_dot_mesh_velocity):
return np.einsum("i,i", flux_u, normal_covector)
def dg_package_data_abs_char_speed(u, flux_u, velocity_field, normal_covector,
mesh_velocity, normal_dot_mesh_velocity):
normal_dot_velocity = | np.einsum("i,i", velocity_field, normal_covector) | numpy.einsum |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import re
from copy import copy as copy_obj
from numbers import Integral
from typing import Type, Sequence
import numpy as np
import pandas as pd
from pandas._libs import lib
from pandas.api.indexers import check_array_indexer
from pandas.api.types import (
pandas_dtype,
is_scalar,
is_array_like,
is_string_dtype,
is_list_like,
)
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
)
from pandas.arrays import StringArray as StringArrayBase
from pandas.core import ops
from pandas.core.algorithms import take
from pandas.compat import set_function_name
try:
from pandas._libs.arrays import NDArrayBacked
except ImportError:
NDArrayBacked = None
try:
import pyarrow as pa
pa_null = pa.NULL
except ImportError: # pragma: no cover
pa = None
pa_null = None
from ..config import options
from ..core import is_kernel_mode
from ..lib.version import parse as parse_version
from ..utils import tokenize
_use_bool_any_all = parse_version(pd.__version__) >= parse_version("1.3.0")
class ArrowDtype(ExtensionDtype):
@property
def arrow_type(self): # pragma: no cover
raise NotImplementedError
def __from_arrow__(self, array):
return self.construct_array_type()(array)
@register_extension_dtype
class ArrowStringDtype(ArrowDtype):
"""
Extension dtype for arrow string data.
.. warning::
ArrowStringDtype is considered experimental. The implementation and
parts of the API may change without warning.
In particular, ArrowStringDtype.na_value may change to no longer be
``numpy.nan``.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> import mars.dataframe as md
>>> md.ArrowStringDtype()
ArrowStringDtype
"""
type = str
kind = "U"
name = "Arrow[string]"
na_value = pa_null
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@classmethod
def construct_array_type(cls) -> "Type[ArrowStringArray]":
return ArrowStringArray
@property
def arrow_type(self):
return pa.string()
@register_extension_dtype
class ArrowStringDtypeAlias(ArrowStringDtype):
name = "arrow_string" # register an alias name for compatibility
class ArrowListDtypeType(type):
"""
the type of ArrowListDtype, this metaclass determines subclass ability
"""
pass
class ArrowListDtype(ArrowDtype):
_metadata = ("_value_type",)
def __init__(self, dtype):
if isinstance(dtype, type(self)):
dtype = dtype.value_type
if pa and isinstance(dtype, pa.DataType):
dtype = dtype.to_pandas_dtype()
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype) and not isinstance(dtype, ArrowStringDtype):
# convert string dtype to arrow string dtype
dtype = ArrowStringDtype()
self._value_type = dtype
@property
def value_type(self):
return self._value_type
@property
def kind(self):
return "O"
@property
def type(self):
return ArrowListDtypeType
@property
def name(self):
return f"Arrow[List[{self.value_type.name}]]"
@property
def arrow_type(self):
if isinstance(self._value_type, ArrowDtype):
arrow_subdtype = self._value_type.arrow_type
else:
arrow_subdtype = pa.from_numpy_dtype(self._value_type)
return pa.list_(arrow_subdtype)
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> "Type[ArrowListArray]":
return ArrowListArray
@classmethod
def construct_from_string(cls, string):
msg = f"Cannot construct a 'ArrowListDtype' from '{string}'"
xpr = re.compile(r"Arrow\[List\[(?P<value_type>[^,]*)\]\]$")
m = xpr.match(string)
if m:
value_type = m.groupdict()["value_type"]
return ArrowListDtype(value_type)
else:
raise TypeError(msg)
@classmethod
def is_dtype(cls, dtype) -> bool:
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, str):
try:
cls.construct_from_string(dtype)
except TypeError:
return False
else:
return True
else:
return isinstance(dtype, cls)
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not isinstance(other, ArrowListDtype):
return False
value_type = self._value_type
other_value_type = other._value_type
try:
return value_type == other_value_type
except TypeError:
# cannot compare numpy dtype and extension dtype
return other_value_type == value_type
class ArrowArray(ExtensionArray):
_arrow_type = None
def __init__(self, values, dtype: ArrowDtype = None, copy=False):
pandas_only = self._pandas_only()
if pa is not None and not pandas_only:
self._init_by_arrow(values, dtype=dtype, copy=copy)
elif not is_kernel_mode():
# not in kernel mode, allow to use numpy handle data
# just for infer dtypes purpose
self._init_by_numpy(values, dtype=dtype, copy=copy)
else:
raise ImportError(
"Cannot create ArrowArray " "when `pyarrow` not installed"
)
# for test purpose
self._force_use_pandas = pandas_only
def _init_by_arrow(self, values, dtype: ArrowDtype = None, copy=False):
if isinstance(values, (pd.Index, pd.Series)):
# for pandas Index and Series,
# convert to PandasArray
values = values.array
if isinstance(values, type(self)):
arrow_array = values._arrow_array
elif isinstance(values, ExtensionArray):
# if come from pandas object like index,
# convert to pandas StringArray first,
# validation will be done in construct
arrow_array = pa.chunked_array([pa.array(values, from_pandas=True)])
elif isinstance(values, pa.ChunkedArray):
arrow_array = values
elif isinstance(values, pa.Array):
arrow_array = pa.chunked_array([values])
else:
arrow_array = pa.chunked_array([pa.array(values, type=dtype.arrow_type)])
if copy:
arrow_array = copy_obj(arrow_array)
self._use_arrow = True
self._arrow_array = arrow_array
if NDArrayBacked is not None and isinstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, np.array([]), dtype)
else:
self._dtype = dtype
def _init_by_numpy(self, values, dtype: ArrowDtype = None, copy=False):
self._use_arrow = False
ndarray = | np.array(values, copy=copy) | numpy.array |
"""
This module contains the `PostProcessor` class.
It contains all advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
"""
from __future__ import absolute_import # noreorder
import math
import os
import time
import warnings
from pyaedt.generic.general_methods import is_ironpython
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.generic.plot import ModelPlotter
from pyaedt.modules.PostProcessor import PostProcessor as Post
if not is_ironpython:
try:
import numpy as np
except ImportError:
warnings.warn(
"The NumPy module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install numpy\n\nRequires CPython."
)
try:
from IPython.display import Image
ipython_available = True
except ImportError:
warnings.warn(
"The Ipython module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install ipython\n\nRequires CPython."
)
try:
import matplotlib.pyplot as plt
except ImportError:
warnings.warn(
"The Matplotlib module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install matplotlib\n\nRequires CPython."
)
except:
pass
class PostProcessor(Post):
"""Contains advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
Parameters
----------
app :
Inherited parent object.
Examples
--------
Basic usage demonstrated with an HFSS, Maxwell, or any other design:
>>> from pyaedt import Hfss
>>> aedtapp = Hfss()
>>> post = aedtapp.post
"""
def __init__(self, app):
Post.__init__(self, app)
@pyaedt_function_handler()
def nb_display(self, show_axis=True, show_grid=True, show_ruler=True):
"""Show the Jupyter Notebook display.
.. note::
.assign_curvature_extraction Jupyter Notebook is not supported by IronPython.
Parameters
----------
show_axis : bool, optional
Whether to show the axes. The default is ``True``.
show_grid : bool, optional
Whether to show the grid. The default is ``True``.
show_ruler : bool, optional
Whether to show the ruler. The default is ``True``.
Returns
-------
:class:`IPython.core.display.Image`
Jupyter notebook image.
"""
file_name = self.export_model_picture(show_axis=show_axis, show_grid=show_grid, show_ruler=show_ruler)
return Image(file_name, width=500)
@pyaedt_function_handler()
def get_efields_data(self, setup_sweep_name="", ff_setup="Infinite Sphere1", freq="All"):
"""Compute Etheta and EPhi.
.. warning::
This method requires NumPy to be installed on your machine.
Parameters
----------
setup_sweep_name : str, optional
Name of the setup for computing the report. The default is ``""``, in
which case the nominal adaptive is applied.
ff_setup : str, optional
Far field setup. The default is ``"Infinite Sphere1"``.
freq : str, optional
The default is ``"All"``.
Returns
-------
np.ndarray
numpy array containing ``[theta_range, phi_range, Etheta, Ephi]``.
"""
if not setup_sweep_name:
setup_sweep_name = self._app.nominal_adaptive
results_dict = {}
all_sources = self.post_osolution.GetAllSources()
# assuming only 1 mode
all_sources_with_modes = [s + ":1" for s in all_sources]
for n, source in enumerate(all_sources_with_modes):
edit_sources_ctxt = [["IncludePortPostProcessing:=", False, "SpecifySystemPower:=", False]]
for m, each in enumerate(all_sources_with_modes):
if n == m: # set only 1 source to 1W, all the rest to 0
mag = 1
else:
mag = 0
phase = 0
edit_sources_ctxt.append(
["Name:=", "{}".format(each), "Magnitude:=", "{}W".format(mag), "Phase:=", "{}deg".format(phase)]
)
self.post_osolution.EditSources(edit_sources_ctxt)
ctxt = ["Context:=", ff_setup]
sweeps = ["Theta:=", ["All"], "Phi:=", ["All"], "Freq:=", [freq]]
trace_name = "rETheta"
solnData = self.get_far_field_data(
setup_sweep_name=setup_sweep_name, domain=ff_setup, expression=trace_name
)
data = solnData.nominal_variation
theta_vals = np.degrees(np.array(data.GetSweepValues("Theta")))
phi_vals = np.degrees(np.array(data.GetSweepValues("Phi")))
# phi is outer loop
theta_unique = np.unique(theta_vals)
phi_unique = np.unique(phi_vals)
theta_range = np.linspace(np.min(theta_vals), np.max(theta_vals), np.size(theta_unique))
phi_range = np.linspace(np.min(phi_vals), np.max(phi_vals), np.size(phi_unique))
real_theta = np.array(data.GetRealDataValues(trace_name))
imag_theta = np.array(data.GetImagDataValues(trace_name))
trace_name = "rEPhi"
solnData = self.get_far_field_data(
setup_sweep_name=setup_sweep_name, domain=ff_setup, expression=trace_name
)
data = solnData.nominal_variation
real_phi = np.array(data.GetRealDataValues(trace_name))
imag_phi = np.array(data.GetImagDataValues(trace_name))
Etheta = np.vectorize(complex)(real_theta, imag_theta)
Ephi = np.vectorize(complex)(real_phi, imag_phi)
source_name_without_mode = source.replace(":1", "")
results_dict[source_name_without_mode] = [theta_range, phi_range, Etheta, Ephi]
return results_dict
@pyaedt_function_handler()
def ff_sum_with_delta_phase(self, ff_data, xphase=0, yphase=0):
"""Generate a far field sum with a delta phase.
Parameters
----------
ff_data :
xphase : float, optional
Phase in the X-axis direction. The default is ``0``.
yphase : float, optional
Phase in the Y-axis direction. The default is ``0``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
array_size = [4, 4]
loc_offset = 2
rETheta = ff_data[2]
rEPhi = ff_data[3]
weight = np.zeros((array_size[0], array_size[0]))
mag = np.ones((array_size[0], array_size[0]))
for m in range(array_size[0]):
for n in range(array_size[1]):
mag = mag[m][n]
ang = np.radians(xphase * m) + np.radians(yphase * n)
weight[m][n] = np.sqrt(mag) * np.exp(1 * ang)
return True
@pyaedt_function_handler()
def plot_model_obj(
self,
objects=None,
show=True,
export_path=None,
plot_as_separate_objects=True,
plot_air_objects=False,
force_opacity_value=None,
clean_files=False,
):
"""Plot the model or a substet of objects.
Parameters
----------
objects : list, optional
Optional list of objects to plot. If `None` all objects will be exported.
show : bool, optional
Show the plot after generation or simply return the
generated Class for more customization before plot.
export_path : str, optional
If available, an image is saved to file. If `None` no image will be saved.
plot_as_separate_objects : bool, optional
Plot each object separately. It may require more time to export from AEDT.
plot_air_objects : bool, optional
Plot also air and vacuum objects.
force_opacity_value : float, optional
Opacity value between 0 and 1 to be applied to all model.
If `None` aedt opacity will be applied to each object.
clean_files : bool, optional
Clean created files after plot. Cache is mainteined into the model object returned.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
assert self._app._aedt_version >= "2021.2", self.logger.error("Object is supported from AEDT 2021 R2.")
files = self.export_model_obj(
obj_list=objects,
export_as_single_objects=plot_as_separate_objects,
air_objects=plot_air_objects,
)
if not files:
self.logger.warning("No Objects exported. Try other options or include Air objects.")
return False
model = ModelPlotter()
for file in files:
if force_opacity_value:
model.add_object(file[0], file[1], force_opacity_value, self.modeler.model_units)
else:
model.add_object(file[0], file[1], file[2], self.modeler.model_units)
if not show:
model.off_screen = True
if export_path:
model.plot(export_path)
elif show:
model.plot()
if clean_files:
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def plot_field_from_fieldplot(
self,
plotname,
project_path="",
meshplot=False,
imageformat="jpg",
view="isometric",
plot_label="Temperature",
plot_folder=None,
show=True,
scale_min=None,
scale_max=None,
):
"""Export a field plot to an image file (JPG or PNG) using Python Plotly.
.. note::
The Plotly module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the field plot to export.
project_path : str, optional
Path for saving the image file. The default is ``""``.
meshplot : bool, optional
Whether to create and plot the mesh over the fields. The
default is ``False``.
imageformat : str, optional
Format of the image file. Options are ``"jpg"``,
``"png"``, ``"svg"``, and ``"webp"``. The default is
``"jpg"``.
view : str, optional
View to export. Options are ``isometric``, ``top``, ``front``,
``left``, ``all``.. The default is ``"iso"``. If ``"all"``, all views are exported.
plot_label : str, optional
Type of the plot. The default is ``"Temperature"``.
plot_folder : str, optional
Plot folder to update before exporting the field.
The default is ``None``, in which case all plot
folders are updated.
show : bool, optional
Export Image without plotting on UI.
scale_min : float, optional
Fix the Scale Minimum value.
scale_max : float, optional
Fix the Scale Maximum value.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if not plot_folder:
self.ofieldsreporter.UpdateAllFieldsPlots()
else:
self.ofieldsreporter.UpdateQuantityFieldsPlots(plot_folder)
start = time.time()
file_to_add = self.export_field_plot(plotname, self._app.working_directory)
models = None
if not file_to_add:
return False
else:
if self._app._aedt_version >= "2021.2":
models = self.export_model_obj(export_as_single_objects=True, air_objects=False)
model = ModelPlotter()
model.off_screen = not show
if file_to_add:
model.add_field_from_file(file_to_add, coordinate_units=self.modeler.model_units, show_edges=meshplot)
if plot_label:
model.fields[0].label = plot_label
if models:
for m in models:
model.add_object(m[0], m[1], m[2])
model.view = view
if scale_min and scale_max:
model.range_min = scale_min
model.range_max = scale_max
if show or project_path:
model.plot(os.path.join(project_path, self._app.project_name + "." + imageformat))
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def animate_fields_from_aedtplt(
self,
plotname,
plot_folder=None,
meshplot=False,
variation_variable="Phi",
variation_list=["0deg"],
project_path="",
export_gif=False,
show=True,
):
"""Generate a field plot to an image file (JPG or PNG) using PyVista.
.. note::
The PyVista module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the plot or the name of the object.
plot_folder : str, optional
Name of the folder in which the plot resides. The default
is ``None``.
variation_variable : str, optional
Variable to vary. The default is ``"Phi"``.
variation_list : list, optional
List of variation values with units. The default is
``["0deg"]``.
project_path : str, optional
Path for the export. The default is ``""`` which export file in working_directory.
meshplot : bool, optional
The default is ``False``. Valid from Version 2021.2.
export_gif : bool, optional
The default is ``False``.
show=False,
show : bool, optional
Generate the animation without showing an interactive plot. The default is ``True``.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if not plot_folder:
self.ofieldsreporter.UpdateAllFieldsPlots()
else:
self.ofieldsreporter.UpdateQuantityFieldsPlots(plot_folder)
models_to_add = []
if meshplot:
if self._app._aedt_version >= "2021.2":
models_to_add = self.export_model_obj(export_as_single_objects=True, air_objects=False)
fields_to_add = []
if not project_path:
project_path = self._app.working_directory
for el in variation_list:
self._app._odesign.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:FieldsPostProcessorTab",
["NAME:PropServers", "FieldsReporter:" + plotname],
["NAME:ChangedProps", ["NAME:" + variation_variable, "Value:=", el]],
],
]
)
fields_to_add.append(
self.export_field_plot(plotname, project_path, plotname + variation_variable + str(el))
)
model = ModelPlotter()
model.off_screen = not show
if models_to_add:
for m in models_to_add:
model.add_object(m[0], cad_color=m[1], opacity=m[2])
if fields_to_add:
model.add_frames_from_file(fields_to_add)
if export_gif:
model.gif_file = os.path.join(self._app.working_directory, self._app.project_name + ".gif")
if show or export_gif:
model.animate()
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def animate_fields_from_aedtplt_2(
self,
quantityname,
object_list,
plottype,
meshplot=False,
setup_name=None,
intrinsic_dict={},
variation_variable="Phi",
variation_list=["0deg"],
project_path="",
export_gif=False,
show=True,
zoom=None,
):
"""Generate a field plot to an animated gif file using PyVista.
.. note::
The PyVista module rebuilds the mesh and the overlap fields on the mesh.
This method creates the plot and exports it.
It is an alternative to the method :func:`animate_fields_from_aedtplt`,
which uses an existing plot.
Parameters
----------
quantityname : str
Name of the plot or the name of the object.
object_list : list, optional
Name of the ``folderplot`` folder.
plottype : str
Type of the plot. Options are ``"Surface"``, ``"Volume"``, and
``"CutPlane"``.
meshplot : bool, optional
The default is ``False``.
setup_name : str, optional
Name of the setup (sweep) to use for the export. The default is
``None``.
intrinsic_dict : dict, optional
Intrinsic dictionary that is needed for the export.
The default is ``{}``.
variation_variable : str, optional
Variable to vary. The default is ``"Phi"``.
variation_list : list, option
List of variation values with units. The default is
``["0deg"]``.
project_path : str, optional
Path for the export. The default is ``""`` which export file in working_directory.
export_gif : bool, optional
Whether to export to a GIF file. The default is ``False``,
in which case the plot is exported to a JPG file.
show : bool, optional
Generate the animation without showing an interactive plot. The default is ``True``.
zoom : float, optional
Zoom factor.
Returns
-------
:class:`pyaedt.generic.plot.ModelPlotter`
Model Object.
"""
if not project_path:
project_path = self._app.working_directory
models_to_add = []
if meshplot:
if self._app._aedt_version >= "2021.2":
models_to_add = self.export_model_obj(export_as_single_objects=True, air_objects=False)
v = 0
fields_to_add = []
for el in variation_list:
intrinsic_dict[variation_variable] = el
if plottype == "Surface":
plotf = self.create_fieldplot_surface(object_list, quantityname, setup_name, intrinsic_dict)
elif plottype == "Volume":
plotf = self.create_fieldplot_volume(object_list, quantityname, setup_name, intrinsic_dict)
else:
plotf = self.create_fieldplot_cutplane(object_list, quantityname, setup_name, intrinsic_dict)
if plotf:
file_to_add = self.export_field_plot(plotf.name, project_path, plotf.name + str(v))
if file_to_add:
fields_to_add.append(file_to_add)
plotf.delete()
v += 1
model = ModelPlotter()
model.off_screen = not show
if models_to_add:
for m in models_to_add:
model.add_object(m[0], cad_color=m[1], opacity=m[2])
if fields_to_add:
model.add_frames_from_file(fields_to_add)
if export_gif:
model.gif_file = os.path.join(self._app.working_directory, self._app.project_name + ".gif")
if zoom:
model.zoom = zoom
if show or export_gif:
model.animate()
model.clean_cache_and_files(clean_cache=False)
return model
@pyaedt_function_handler()
def far_field_plot(self, ff_data, x=0, y=0, qty="rETotal", dB=True, array_size=[4, 4]):
"""Generate a far field plot.
Parameters
----------
ff_data :
x : float, optional
The default is ``0``.
y : float, optional
The default is ``0``.
qty : str, optional
The default is ``"rETotal"``.
dB : bool, optional
The default is ``True``.
array_size : list
List for the array size. The default is ``[4, 4]``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
loc_offset = 2 # if array index is not starting at [1,1]
xphase = float(y)
yphase = float(x)
array_shape = (array_size[0], array_size[1])
weight = np.zeros(array_shape, dtype=complex)
mag = np.ones(array_shape, dtype="object")
port_names_arranged = np.chararray(array_shape)
all_ports = ff_data.keys()
w_dict = {}
# calculate weights based off of progressive phase shift
port_name = []
for m in range(array_shape[0]):
for n in range(array_shape[1]):
mag_val = mag[m][n]
ang = np.radians(xphase * m) + np.radians(yphase * n)
weight[m][n] = np.sqrt(mag_val) * np.exp(1j * ang)
current_index_str = "[" + str(m + 1 + loc_offset) + "," + str(n + 1 + loc_offset) + "]"
port_name = [y for y in all_ports if current_index_str in y]
w_dict[port_name[0]] = weight[m][n]
length_of_ff_data = len(ff_data[port_name[0]][2])
array_shape = (len(w_dict), length_of_ff_data)
rEtheta_fields = np.zeros(array_shape, dtype=complex)
rEphi_fields = np.zeros(array_shape, dtype=complex)
w = np.zeros((1, array_shape[0]), dtype=complex)
# create port mapping
Ntheta = 0
Nphi = 0
for n, port in enumerate(ff_data.keys()):
re_theta = ff_data[port][2]
re_phi = ff_data[port][3]
re_theta = re_theta * w_dict[port]
w[0][n] = w_dict[port]
re_phi = re_phi * w_dict[port]
rEtheta_fields[n] = re_theta
rEphi_fields[n] = re_phi
theta_range = ff_data[port][0]
phi_range = ff_data[port][1]
theta = [int(np.min(theta_range)), int( | np.max(theta_range) | numpy.max |
import os,warnings
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import tensorflow.contrib.slim as slim
import scipy.io as sio
from sklearn.utils import shuffle
from util import gpusession,nzr,print_n_txt,remove_file_if_exists
import matplotlib.pyplot as plt
class mlp_reg_class(object):
def __init__(self,_name='mlp_reg',_x_dim=1,_y_dim=1,_h_dims=[64, 64],_actv=tf.nn.tanh,_bn=slim.batch_norm,
_l2_reg_coef=1e-5,_GPU_ID=0,_L1_LOSS=False,_ROBUST_LOSS=False,_LEAKY_ROBUST_LOSS=False,_VERBOSE=True):
self.name = _name
self.x_dim = _x_dim
self.y_dim = _y_dim
self.h_dims = _h_dims
self.actv = _actv
self.bn = _bn
self.l2_reg_coef = _l2_reg_coef
self.GPU_ID = _GPU_ID
self.L1_LOSS = _L1_LOSS
self.ROBUST_LOSS = _ROBUST_LOSS
self.LEAKY_ROBUST_LOSS = _LEAKY_ROBUST_LOSS
self.VERBOSE = _VERBOSE
if _GPU_ID < 0: # with CPU only (no GPU)
# Build model
self.build_model()
# Build graph
self.build_graph()
# Check params
self.check_params()
else: # with GPU
with tf.device('/device:GPU:%d' % (self.GPU_ID)):
# Build model
self.build_model()
# Build graph
self.build_graph()
# Check params
self.check_params()
def build_model(self):
self.x = tf.placeholder(dtype=tf.float32,shape=[None,self.x_dim]) # Input [N x xdim]
self.y = tf.placeholder(dtype=tf.float32,shape=[None,self.y_dim]) # Output [N x ydim]
self.kp = tf.placeholder(dtype=tf.float32,shape=[]) # Keep probability
self.lr = tf.placeholder(dtype=tf.float32,shape=[]) # Learning rate
self.is_training = tf.placeholder(dtype=tf.bool,shape=[]) # Training flag
self.fully_init = tf.random_normal_initializer(stddev=0.01)
self.bias_init = tf.constant_initializer(0.)
self.bn_init = {'beta': tf.constant_initializer(0.),
'gamma': tf.random_normal_initializer(1., 0.01)}
self.bn_params = {'is_training':self.is_training,'decay':0.9,'epsilon':1e-5,
'param_initializers':self.bn_init,'updates_collections':None}
# Build graph
with tf.variable_scope(self.name,reuse=False) as scope:
with slim.arg_scope([slim.fully_connected]
,activation_fn=self.actv
,weights_initializer=self.fully_init
,biases_initializer=self.bias_init
,normalizer_fn=self.bn,normalizer_params=self.bn_params
,weights_regularizer=None):
_net = self.x # Input [N x xdim]
for h_idx in range(len(self.h_dims)): # Loop over hidden layers
_h_dim = self.h_dims[h_idx]
_net = slim.fully_connected(_net,_h_dim,scope='lin'+str(h_idx))
_net = slim.dropout(_net,keep_prob=self.kp,is_training=self.is_training
,scope='dr'+str(h_idx))
self.feat = _net # Feature [N x Q]
self.out = slim.fully_connected(self.feat,self.y_dim,activation_fn=None
,scope='out') # [N x D]
def build_graph(self):
# fitting loss
if self.L1_LOSS: # L1 loss
self._loss_fit = tf.reduce_sum(tf.abs(self.out-self.y),axis=1) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
elif self.ROBUST_LOSS: # Tukey biweight loss
USE_MAD = False
self.residuals = self.out-self.y # [N x 1]
if USE_MAD:
median = tf.contrib.distributions.percentile(self.residuals, 50.0)
temp = tf.abs(self.residuals-median)
mad = tf.contrib.distributions.percentile(temp, 50.0)
b = 1.4826 # 1.4826
self.r_mad = self.residuals/b/mad
else:
self.r_mad = self.residuals
c = 1 # 4.6851
self.condition = tf.less(tf.abs(self.r_mad),c)
self._loss_fit = tf.where(self.condition,
c*c/6*(1-tf.pow((1-tf.pow(self.r_mad/c,2)),3)),
c*c/6*tf.ones_like(self.r_mad)) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
elif self.LEAKY_ROBUST_LOSS: # Leakey Tukey biweight loss
USE_MAD = False
self.residuals = self.out-self.y # [N x 1]
if USE_MAD:
median = tf.contrib.distributions.percentile(self.residuals, 50.0)
temp = tf.abs(self.residuals-median)
mad = tf.contrib.distributions.percentile(temp, 50.0)
b = 1.4826 # 1.4826
self.r_mad = self.residuals/b/mad
else:
self.r_mad = self.residuals
c = 1 # 4.6851
self.condition = tf.less(tf.abs(self.r_mad),c)
leaky_rate = 0.1 # 0.1
self._loss_fit = tf.where(self.condition,
c*c/6*(1-tf.pow((1-tf.pow(self.r_mad/c,2)),3)),
leaky_rate*(tf.abs(self.r_mad)-c) + c*c/6) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
else: # ordinary L2 loss
self._loss_fit = tf.reduce_sum(tf.pow(self.out-self.y,2),axis=1) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
# Weight decay
_t_vars = tf.trainable_variables()
self.c_vars = [var for var in _t_vars if '%s/'%(self.name) in var.name]
self.l2_reg = self.l2_reg_coef*tf.reduce_sum(tf.stack([tf.nn.l2_loss(v) for v in self.c_vars])) # [1]
self.loss_total = self.loss_fit + self.l2_reg # [1]
# Optimizer
USE_ADAM = False
if USE_ADAM:
self.optm = tf.train.AdamOptimizer(learning_rate=self.lr,beta1=0.9,beta2=0.999
,epsilon=1e-8).minimize(self.loss_total)
else:
self.optm = tf.train.MomentumOptimizer(learning_rate=self.lr
,momentum=0.5).minimize(self.loss_total)
def check_params(self):
_g_vars = tf.global_variables()
self.g_vars = [var for var in _g_vars if '%s/'%(self.name) in var.name]
if self.VERBOSE:
print ("==== Global Variables ====")
for i in range(len(self.g_vars)):
w_name = self.g_vars[i].name
w_shape = self.g_vars[i].get_shape().as_list()
if self.VERBOSE:
print (" [%02d] Name:[%s] Shape:[%s]" % (i,w_name,w_shape))
def sampler(self,_sess,_x):
outVal = _sess.run(self.out,feed_dict={self.x:_x,self.kp:1.0,self.is_training:False})
return outVal
def save2npz(self,_sess,_save_name=None):
""" Save name """
if _save_name==None:
_save_name='net/net_%s.npz'%(self.name)
""" Get global variables """
self.g_wnames,self.g_wvals,self.g_wshapes = [],[],[]
for i in range(len(self.g_vars)):
curr_wname = self.g_vars[i].name
curr_wvar = [v for v in tf.global_variables() if v.name==curr_wname][0]
curr_wval = _sess.run(curr_wvar)
curr_wval_sqz = curr_wval.squeeze()
self.g_wnames.append(curr_wname)
self.g_wvals.append(curr_wval_sqz)
self.g_wshapes.append(curr_wval.shape)
""" Save """
np.savez(_save_name,g_wnames=self.g_wnames,g_wvals=self.g_wvals,g_wshapes=self.g_wshapes)
if self.VERBOSE:
print ("[%s] saved. Size is [%.4f]MB" %
(_save_name,os.path.getsize(_save_name)/1000./1000.))
def restore_from_npz(self,_sess,_loadname=None):
if _loadname==None:
_loadname='net/net_%s_final.npz'%(self.name)
l = np.load(_loadname)
g_wnames = l['g_wnames']
g_wvals = l['g_wvals']
g_wshapes = l['g_wshapes']
for widx,wname in enumerate(g_wnames):
curr_wvar = [v for v in tf.global_variables() if v.name==wname][0]
_sess.run(tf.assign(curr_wvar,g_wvals[widx].reshape(g_wshapes[widx])))
if self.VERBOSE:
print ("Weight restored from [%s] Size is [%.4f]MB" %
(_loadname,os.path.getsize(_loadname)/1000./1000.))
def save2mat_from_npz(self,_x_train='',_y_train='',_save_name=None,_npz_path=None):
# Save weights to mat file so that MATLAB can use it.
if _npz_path == None:
_npz_path = 'net/net_%s.npz'%(self.name)
l = np.load(_npz_path)
g_wnames = l['g_wnames']
g_wvals = l['g_wvals']
g_wshapes = l['g_wshapes']
D = {}
for w_idx,w_name in enumerate(g_wnames):
curr_name = w_name.replace(':0','')
curr_name = curr_name.replace(self.name+'/','')
curr_name = curr_name.replace('/','_')
curr_val = g_wvals[w_idx].reshape(g_wshapes[w_idx])
D[curr_name] = curr_val
# Save train data
if _x_train!='': D['x_train'] = _x_train
if _y_train!='': D['y_train'] = _y_train
# Save dictionary D to the mat file
if _save_name == None:
_save_name = 'net/net_%s.mat'%(self.name)
sio.savemat(_save_name,D)
if self.VERBOSE:
print ("[%s] saved. Size is [%.4f]MB" %
(_save_name,os.path.getsize(_save_name)/1000./1000.))
def train(self,_sess,_x_train,_y_train,_lr=1e-3,_batch_size=512,_max_epoch=1e4,_kp=1.0,
_LR_SCHEDULE=True,_PRINT_EVERY=20,_PLOT_EVERY=20,
_SAVE_TXT=True,_SAVE_BEST_NET=True,_SAVE_FINAL=True,_REMOVE_PREVS=True,
_x_dim4plot=0,_x_name4plot=None):
self.x_dim4plot = _x_dim4plot
self.x_name4plot = _x_name4plot
# Remove existing files
if _REMOVE_PREVS:
remove_file_if_exists('net/net_%s_best.npz'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('net/net_%s_best.mat'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('net/net_%s_final.npz'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('net/net_%s_final.mat'%(self.name),_VERBOSE=self.VERBOSE)
remove_file_if_exists('res/res_%s.txt'%(self.name),_VERBOSE=self.VERBOSE)
# Reference training data
x_train,y_train = _x_train,_y_train
if len(np.shape(y_train)) == 1: # if y is a vector
y_train = np.reshape(y_train,newshape=[-1,1]) # make it rank two
self.nzr_x,self.nzr_y = nzr(x_train),nzr(y_train) # get normalizer
# Iterate
if _PRINT_EVERY == 0: print_period = 0
else: print_period = _max_epoch//_PRINT_EVERY
if _PLOT_EVERY == 0: plot_period = 0
else: plot_period = _max_epoch//_PLOT_EVERY
max_iter = max(x_train.shape[0]//_batch_size, 1)
best_loss_val = np.inf
if _SAVE_TXT:
txt_name = ('res/res_%s.txt'%(self.name));f = open(txt_name,'w') # Open txt file
print_n_txt(_f=f,_chars='Text: '+txt_name,_DO_PRINT=self.VERBOSE)
for epoch in range((int)(_max_epoch)+1): # For every epoch
x_train,y_train = shuffle(x_train,y_train)
nzd_x_train,nzd_y_train = self.nzr_x.get_nzdval(x_train),self.nzr_y.get_nzdval(y_train)
for iter in range(max_iter): # For every iteration
start,end = iter*_batch_size,(iter+1)*_batch_size
if _LR_SCHEDULE:
if epoch < 0.5*_max_epoch:
lr_use = _lr
elif epoch < 0.75*_max_epoch:
lr_use = _lr/5.
else:
lr_use = _lr/10.
else:
lr_use = _lr
feeds = {self.x:nzd_x_train[start:end,:],self.y:nzd_y_train[start:end,:]
,self.kp:_kp,self.lr:lr_use,self.is_training:True}
# Optimize
_sess.run(self.optm,feeds)
# Track the Best result
BEST_FLAG = False
check_period = _max_epoch//100
if (epoch % check_period)==0:
# Feed total dataset
feeds = {self.x:nzd_x_train,self.y:nzd_y_train,self.kp:1.0,self.is_training:False}
opers = [self.loss_total,self.loss_fit,self.l2_reg]
loss_val,loss_fit,l2_reg = _sess.run(opers,feeds)
if (loss_val < best_loss_val) & (epoch >= 3):
best_loss_val = loss_val
BEST_FLAG = True
if _SAVE_BEST_NET: # Save the current best model
if self.VERBOSE:
print ("Epoch:[%d] saving current network (best loss:[%.3f])"%(epoch,best_loss_val))
self.save2npz(_sess,_save_name='net/net_%s_best.npz'%(self.name))
self.save2mat_from_npz(_x_train=x_train,_y_train=y_train,
_save_name='net/net_%s_best.mat'%(self.name),
_npz_path='net/net_%s_best.npz'%(self.name))
# Print current result
if (print_period!=0) and ((epoch%print_period)==0 or (epoch==(_max_epoch-1))): # Print
feeds = {self.x:nzd_x_train,self.y:nzd_y_train,self.kp:1.0,self.is_training:False}
opers = [self.loss_total,self.loss_fit,self.l2_reg]
loss_val,loss_fit,l2_reg = _sess.run(opers,feeds)
if _SAVE_TXT:
str_temp = ("[%d/%d] loss:%.3f(fit:%.3f+l2:%.3f) bestLoss:%.3f"
%(epoch,_max_epoch,loss_val,loss_fit,l2_reg,best_loss_val))
print_n_txt(_f=f,_chars=str_temp,_DO_PRINT=self.VERBOSE)
else:
if self.VERBOSE | True :
print ("[%d/%d] loss:%.3f(fit:%.3f+l2:%.3f) bestLoss:%.3f"
%(epoch,_max_epoch,loss_val,loss_fit,l2_reg,best_loss_val))
# Plot current result
if (plot_period!=0) and ((epoch%plot_period)==0 or (epoch==(_max_epoch-1))): # Plot
# Get loss vals
feeds = {self.x:nzd_x_train,self.y:nzd_y_train,self.kp:1.0,self.is_training:False}
opers = [self.loss_total,self.loss_fit,self.l2_reg]
loss_val,loss_fit,l2_reg = _sess.run(opers,feeds)
# Output
nzd_y_test = self.sampler(_sess=_sess,_x=nzd_x_train)
y_pred = self.nzr_y.get_orgval(nzd_y_test)[:,0]
# Plot one dimensions of both input and output
x_plot,y_plot = x_train[:,self.x_dim4plot],y_train[:,0] # Traning data
plt.figure(figsize=(8,4))
# plt.axis([np.min(x_plot),np.max(x_plot),np.min(y_plot)-0.1,np.max(y_plot)+0.1])
h_tr,=plt.plot(x_plot,y_plot,'k.') # Plot training data
h_pr,=plt.plot(x_plot,y_pred,'b.') # Plot prediction
plt.title("[%d/%d] name:[%s] loss_val:[%.3e]"%(epoch,_max_epoch,self.name,loss_val),fontsize=13);
plt.legend([h_tr,h_pr],['Train data','Predictions'],fontsize=13,loc='upper left')
if self.x_name4plot != None:
plt.xlabel(self.x_name4plot,fontsize=13)
plt.show()
# Save final results
if _SAVE_FINAL:
self.save2npz(_sess,_save_name='net/net_%s_final.npz'%(self.name))
self.save2mat_from_npz(_x_train=x_train,_y_train=y_train,
_save_name='net/net_%s_final.mat'%(self.name),
_npz_path='net/net_%s_final.npz'%(self.name))
if self.VERBOSE:
print ("Train done.")
def test(self,_sess,_x_train,_y_train,_x_test=None,_y_test=None,
_title_str4data=None,_title_str4test=None,
_PLOT_TRAIN=False,_PLOT_TEST=False,_SAVE_FIG=False,
_x_dim4plot=0,_x_name4plot=None):
self.x_dim4plot = _x_dim4plot
self.x_name4plot = _x_name4plot
self.nzr_x,self.nzr_y = nzr(_x_train),nzr(_y_train) # get normalizer
# Get normalizer
if len(np.shape(_y_train)) == 1: # if y is a vector
_y_train = np.reshape(_y_train,newshape=[-1,1]) # make it rank two
self.nzr_x,self.nzr_y = nzr(_x_train),nzr(_y_train) # get normalizer
self.nzr_x,self.nzr_y = nzr(_x_train),nzr(y_train) # get normalizer
self.nzr_x.mu, self.nzr_x.std = 0,1
self.nzr_y.mu, self.nzr_y.std = 0,1
else:
self.nzr_x,self.nzr_y = nzr(_x_train),nzr(_y_train) # get normalizer
# Plot train data and predictions
if _PLOT_TRAIN:
if len(np.shape(_y_train)) == 1: # if y is a vector
_y_train = np.reshape(_y_train,newshape=[-1,1]) # make it rank two
x_train4plot,y_train4plot = _x_train[:,self.x_dim4plot],_y_train[:,0] # traning data
nzd_y_pred = self.sampler(_sess=_sess,_x=self.nzr_x.get_nzdval(_x_train))
y_pred_train = self.nzr_y.get_orgval(nzd_y_pred)[:,0]
plt.figure(figsize=(8,4))
plt.axis([np.min(x_train4plot),np.max(x_train4plot),np.min(y_train4plot)-0.1,np.max(y_train4plot)+0.1])
h_tr,=plt.plot(x_train4plot,y_train4plot,'k.') # plot train data
h_pr,=plt.plot(x_train4plot,y_pred_train,'b.') # plot prediction for train data
plt.legend([h_tr,h_pr],['Train data','Train predictions'],fontsize=13,loc='upper left')
if self.x_name4plot != None:
plt.xlabel(self.x_name4plot,fontsize=13)
plt.ylabel('Output',fontsize=13)
if _title_str4data != None:
plt.title(_title_str4data,fontsize=15);
if _SAVE_FIG:
plt.savefig('fig/fig_%s_data.png'%(self.name))
plt.show()
# Plot test data and predictions
if len(np.shape(_y_train)) == 1: # if y is a vector
_y_train = np.reshape(_y_train,newshape=[-1,1]) # make it rank two
if len(np.shape(_y_test)) == 1: # if y is a vector
_y_test = np.reshape(_y_test,newshape=[-1,1]) # make it rank two
x_data4plot,y_data4plot = _x_train[:,self.x_dim4plot],_y_train[:,0] # traning data
x_test4plot,y_test4plot = _x_test[:,self.x_dim4plot],_y_test[:,0] # test data
nzd_y_test = self.sampler(_sess=_sess,_x=self.nzr_x.get_nzdval(_x_test))
y_pred_test = self.nzr_y.get_orgval(nzd_y_test)[:,0]
if _PLOT_TEST:
fig = plt.figure(figsize=(8,4))
plt.axis([ | np.min(x_data4plot) | numpy.min |
# ======================================================================
# Author: TrungNT
# ======================================================================
from __future__ import print_function, division
from .. import logger
from ..trainer import _data, _task, trainer
from ..dataset import dataset
from ..model import model
from .. import tensor
import unittest
import os
from collections import defaultdict
import numpy as np
import h5py
# ===========================================================================
# Main Tests
# ===========================================================================
def model_func():
import lasagne
l = lasagne.layers.InputLayer(shape=(None, 10))
l = lasagne.layers.DenseLayer(l, num_units=64)
l = lasagne.layers.DenseLayer(l, num_units=3,
nonlinearity=lasagne.nonlinearities.linear)
return l
class ModelTest(unittest.TestCase):
def setUp(self):
logger.set_enable(False)
f = h5py.File('tmp.h5', 'w')
f['X_train'] = np.random.rand(1024, 10)
f['y_train'] = np.random.rand(1024, 3)
f['X_test'] = np.random.rand(512, 10)
f['y_test'] = np.random.rand(512, 3)
f['X_valid'] = np.random.rand(512, 10)
f['y_valid'] = np.random.rand(512, 3)
def tearDown(self):
logger.set_enable(True)
if os.path.exists('tmp.h5'):
os.remove('tmp.h5')
def test_data(self):
ds = dataset('tmp.h5', 'r')
d = _data()
d.set(['X_train', 'y_train', ds['X_valid'], ds['y_valid'],
np.random.rand(1024, 13)])
self.assertEqual(len(d._batches), 3)
d.set_dataset(ds)
self.assertEqual(len(d._batches), 5)
it = d.create_iter(32, 0., 1., shuffle=True, seed=13, mode=0)
count = defaultdict(int)
for a, b, c, d, e in it:
count['a'] += a.shape[0]
count['b'] += b.shape[0]
count['c'] += c.shape[0]
count['d'] += d.shape[0]
count['e'] += e.shape[0]
self.assertEqual(count.values(), [512] * 5)
self.assertEqual(a.shape[1], 10)
self.assertEqual(b.shape[1], 3)
self.assertEqual(e.shape[1], 13)
def test_task(self):
ds = dataset('tmp.h5', 'r')
global niter
niter = 0
def task_func(*X):
global nargs, niter
nargs = len(X)
niter += 1
t = _task('task', task_func, _data(ds).set(['X_train', ds['y_train']]),
epoch=2, p=1., seed=13)
t.set_iter(128, 0., 1., shuffle=True, mode=0)
run_it = t.run_iter()
while run_it.next() is not None:
pass
self.assertEqual(nargs, 2)
self.assertEqual(niter, 16)
t._epoch = float('inf') # infinite run
niter = 0
run_it = t.run_iter()
for i in xrange(1000):
run_it.next()
self.assertEqual(niter, 1000)
def test_training(self):
import lasagne
# ====== create model ====== #
m = model()
m.set_model(model_func)
f_cost = m.create_cost(
lambda y_pred, y_true: tensor.mean(tensor.square(y_pred - y_true), axis=-1))
f_update = m.create_updates(
lambda y_pred, y_true: tensor.mean(tensor.square(y_pred - y_true), axis=-1),
lasagne.updates.rmsprop)
# ====== create trainer ====== #
global i, j, k
i, j, k = 0, 0, 0
def train_func(*X):
global i
i += 1
# print('Train', i)
def valid_func(*X):
global j
j += 1
# print('Valid', j)
def test_func(*X):
global k
k += 1
flag = True
def task_start_end(trainer):
self.assertEqual(
trainer.task == 'train' or trainer.task == 'test' or
trainer.task == 'realtrain' or trainer.task == 'realtest', True)
def batch_start(trainer):
global flag
# print(trainer.task, trainer.iter)
if trainer.task == 'train' and train.iter == 100 and flag:
trainer.restart()
flag = False
elif trainer.task == 'test' and train.iter == 50:
trainer.stop()
train = trainer()
train.set_callback(
batch_start=batch_start, task_start=task_start_end,
task_end=task_start_end)
train.add_data('valid', ['X_valid', 'y_valid'])
train.add_data('test', ['X_test', 'y_test'])
train.add_task('train', train_func, ['X_train', 'y_train'], 'tmp.h5',
epoch=2, seed=13)
train.add_subtask(valid_func, 'valid', freq=0.58)
train.add_subtask(test_func, 'test', single_run=True, epoch=-1, p=0.1)
train.add_task('test', test_func, 'test')
while not train.step(): pass
self.assertEqual(train.run(), True)
self.assertEqual(train.step(), True)
self.assertEqual(i, 16) # 2 epochs, 8 iter each
self.assertEqual(j, 12) # 3 epochs, 4 iter each
self.assertEqual(k, 4) # 10% activated
# ====== Main training ====== #
def batch_end(trainer):
pass
def epoch_end(trainer):
if trainer.task == 'realtrain_subtask[0]':
print('Valid:', np.mean(trainer.output))
elif trainer.task == 'realtrain':
print('Train:', | np.mean(trainer.output) | numpy.mean |
# (c) 2012 Massachusetts Institute of Technology. All Rights Reserved
# Code written by: <NAME> (<EMAIL>)
"""
This is a basic toolbox to perform high-resolution analysis of a Hi-C data.
By high resolution we meen that there are more than 10000-20000 bins per genome
- otherwise, binnedData is easily capable of the analysis.
It can perform basic filtering (poor coverage regions), and iterative correction at any resolution.
The main advantage of this class is that it supports both in memory and HDD storage,
and for HDD storage it supports both sparse and dense matrix logic.
In fact, sparse logic with HDF5-based storage in memory is already good (default settings).
.. note::
This class loads data, saved by saveByChromosomeHeatmap method of fragmentHiC.
It is not designed to load heatmaps saved by "saveHeatmap" class, because those
can fit in memory, and should be analyzed by a more powerful binnedData class.
Class structure
---------------
Class defaultMatrix implements a wrapper around a 2D numpy.array matrix. This
class defines methods that will be applied to a Hi-C map between two
chromosomes. This class should serve as a template and backup for other Hi-C map
storage classes which will be inhereted from this class.
Class h5dictMatrix implements a subclass of a defaultMatrix, where the matrix is
in fact stored on the HDD. It leaves all other methods intact, as all other
methods use only self.setData() and self.getData(). Therefore overriding getter
and setter is enough to move matrix storage from memory to an h5dict.
Use defaultMatrix when the speed is critical, if you're working with cis data only.
Class h5dictSparseMatrix implements a subclass of defaultMatrix, where all the
methods were overwrridden to operate in a sparse logic. It stores a Hi-C map as
three arrays: X coordinate, Y coordinate and Value. Therefore, for sparse
matrices all overriden methods will be much faster than default, as they will
bypass calling self.getData and creating a huge matrix in memory. It is
suggested to use h5dictMatrix for cis maps and h5dictSparseMatrix for trans
(between-chromosomal) maps.
Class HiResHiC allows to load a by-chromosome Hi-C map, with either cis only,
or cis and trans matrices. It then allows to do an iterative correction of the
map, and allows for some other filtering. Unlike binnedData, it does not support
multiple datasets at once, or complicated filtering, or PCA. It will support by-
chromosome domain finder from cis reads at some point as we converge on the
ideal algorithm to do this.
On my machine with 32GB RAM it was successfully used to perform IC of a human
Hi-C at 10kb resolution. It took it 20 minutes to load the data, 4 mins to
remove poor bins, and couple hours to perform IC, about 10 minutes per pass. I
also note that the data was in fact stored in memory for doing that, and it
never used more than 16GB of RAM... in fact, creating this dataset used more,
but this will be optimized later.
"""
from mirnylib.genome import Genome
import numpy as np
import warnings
from mirnylib.h5dict import h5dict
from mirnylib import numutils
from mirnylib.systemutils import setExceptionHook
from scipy.stats.stats import spearmanr
from hiclib import hicShared
setExceptionHook()
class defaultMatrix(object):
"""
This is a template object which stores matrix in memory.
Alternatively, matrix can be stored in an h5dict, either normally
or in a sparse mode.
All the methods should be first implemented here using getData() and setData()
Then they shold be translated to sparse subclasses of this class.
"""
def __init__(self, data=None, dictToSave=None, key=""):
"""
Initializes the object that stores the Hi-C matrix.
Parameters
----------
data : 2D matrix in any format
Hi-C matrix between two chromosomes
dictToSave : dict, h5dict or any other dict-like structure
Dict to store actual data. Should be h5dict for high resolution analysis.
Is not needed for defaultMatrix, will be used by it's subclasses only.
key : str or anything
A key, unique for each pair of chromosomes, used to identify dataset
in the dictToSave. Is provided by HiResHiC.
"""
self._h5dict = dictToSave
self._key = repr(key)
if data is not None:
self.setData(data)
def getData(self):
"Returns a matrix in a dense format (NxM array)"
return self.data.copy()
def setData(self, data):
"""Accepts the matrix to store. Here, just puts it in RAM.
For further subclasses this will need to convert the
matrix to the sparse form.
"""
data = np.asarray(data, dtype=np.float64)
assert len(data.shape) == 2
self.data = data
def getSumX(self):
"returns sum of all values along the first (0) axis, i.e. sum of all rows"
return np.sum(self.getData(), axis=0)
def getSumY(self):
"returns sum of all values along the second (1) axis, i.e. sum of all columns"
return np.sum(self.getData(), axis=1)
def getSums(self):
"returns getSumX and getSumY without calling getData() twice"
data = self.getData()
sumX = np.sum(data, axis=0)
sumY = np.sum(data, axis=1)
return (sumX, sumY)
def clearRows(self, rows):
"""Sets to 0 certain rows and columns
Parameters
----------
rows : tuple of two arrays (rows, columns)
Two arrays which hold indices of rows and colums to be removed
"""
rowsX, rowsY = rows
data = self.getData()
data[rowsX, :] = 0
data[:, rowsY] = 0
self.setData(data)
def divideByVectorX(self, vectorX):
"""
Divides each row by correspoinding value from vectorX
"""
vectorX[vectorX == 0] = 1
data = self.getData()
data /= vectorX[None, :]
self.setData(data)
def divideByVectorY(self, vectorY):
"""
Divides each column by correspoinding value from vectorY
"""
vectorY[vectorY == 0] = 1
data = self.getData()
data /= vectorY[:, None]
self.setData(data)
def divideByVectors(self, vectors):
"""
Divides each row and column by correspoinding
value from vectors[0] and vectors[1]
Does it without calling getData twice!
"""
vecX = vectors[0]
vecY = vectors[1]
vecX[vecX == 0] = 1
vecY[vecY == 0] = 1
data = self.getData()
assert data.shape[1] == len(vecX)
assert data.shape[0] == len(vecY)
data /= vecX[None, :]
data /= vecY[:, None]
self.setData(data)
@property
def shape(self):
"""Returns shape of the data.
Should be overridden in subclasses
not to load the data every time! """
return self.getData().shape
class h5dictMatrix(defaultMatrix):
"""
Changes the storage from memory to h5dict, keeping the matrix
in the dense (regular) format.
Just overrides getData, setData and shape to use h5dict.
"""
def getData(self):
data = self._h5dict[self._key]
return data
def setData(self, data):
data = np.asarray(data, dtype=np.float64)
self.savedShape = data.shape
self._h5dict[self._key] = data
@property
def shape(self):
return self.savedShape
class h5dictSparseMatrix(defaultMatrix):
"""
Changes the storage from memory to h5dict,
and changes matrix to sparse.
All methods from defaultMatrix are overridden here!
"""
def __init__(self, data=None, dictToSave=None, key=""):
self._h5dict = dictToSave
self._key = repr(key)
self._keyx = self._key + "x"
self._keyy = self._key + "y"
self._keyv = self._key + "v"
if data is not None:
self.setData(data)
@property
def _X(self):
return self._h5dict[self._keyx]
@_X.setter
def _X(self, data):
self._h5dict[self._keyx] = data
@property
def _Y(self):
return self._h5dict[self._keyy]
@_Y.setter
def _Y(self, data):
self._h5dict[self._keyy] = data
@property
def _V(self):
return self._h5dict[self._keyv]
@_V.setter
def _V(self, data):
self._h5dict[self._keyv] = data
@property
def shape(self):
return self.savedShape
def getData(self):
data = np.zeros(self.savedShape)
data[self._X, self._Y] = self._V
return data
def setData(self, data):
x, y = np.nonzero(data)
self.savedShape = data.shape
values = data[x, y]
values = np.asarray(values, np.float64)
self._X = x
self._Y = y
self._V = values
def getSumX(self):
return np.bincount(self._Y, weights=self._V, minlength=self.shape[1])
def getSumY(self):
return np.bincount(self._X, weights=self._V, minlength=self.shape[0])
def getSums(self):
X, Y, V = self._X, self._Y, self._V
s1 = np.bincount(Y, weights=V, minlength=self.shape[1])
s2 = np.bincount(X, weights=V, minlength=self.shape[0])
return (s1, s2)
def divideByVectorX(self, vecX):
vecX[vecX == 0] = 1
self._V = self._V / vecX[self._Y]
def divideByVectorY(self, vecY):
vecY[vecY == 0] = 1
self._V = self._V / self.vecX[self._X]
def divideByVectors(self, vecs):
vecX = vecs[0]
vecY = vecs[1]
V = self._V
V /= vecX[self._Y]
V /= vecY[self._X]
self._V = V
def clearRows(self, rows):
rowsX, rowsY = rows
indexX = | np.ones(self.shape[0], bool) | numpy.ones |
import numpy as np
import scipy.linalg as splinalg
from numba import vectorize, guvectorize, float32, float64
NUMBA_COMPILATION_TARGET = 'parallel'
def invsqrt(x):
"""Convenience function to compute the inverse square root of a scalar or a square matrix."""
if hasattr(x, 'shape'):
return np.linalg.inv(splinalg.sqrtm(x))
return 1. / np.sqrt(x)
def bilinear_interpolate(im, x, y):
im = | np.atleast_3d(im) | numpy.atleast_3d |
import numpy as np
import matplotlib.pyplot as plt
from mayavi import mlab
# Produce some nice data.
n_mer, n_long = 6, 11
pi = np.pi
dphi = pi/1000.0
phi = np.arange(0.0, 2*pi + 0.5*dphi, dphi, 'd')
mu = phi*n_mer
x = np.cos(mu)*(1+np.cos(n_long*mu/n_mer)*0.5)
y = np.sin(mu)*(1+np.cos(n_long*mu/n_mer)*0.5)
z = np.sin(n_long*mu/n_mer)*0.5
# View it.
l = mlab.quiver3d(x, y, z, x, y, z, colormap='Spectral')
mlab.axes()
# Now animate the data.
@mlab.animate
def anim():
ms = l.mlab_source
for i in range(100):
x = | np.cos(mu) | numpy.cos |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 14:06:09 2020
@author: u6265553
"""
'''This script generates the inverse transformation function required
for moving lens shift-scale calibration to align the synthetic images
generated by Blender to a reference image to obtain fully in-focus image
applying image stacking. This script calculates the shift and scaling
required to align the images and generate required transformaton functions.'''
import glob
import numpy as np
import cv2
from skimage import restoration
from scipy.signal import convolve2d as conv2
from scipy import misc, optimize, special
from matplotlib import pylab as plt
pixelsize=4e-6
#focal length of the lens in meter
f_lens=65e-3
#focal length of the lens in pixel
f_lens_pixel=f_lens/pixelsize
forward_translation=-0.021
backward_translation=0.019
average_camera_distance=0.15
max_camera_distnace=average_camera_distance+backward_translation
f_camera=average_camera_distance*f_lens/(average_camera_distance-f_lens)
#focal length of the camera in pixel
f_camera_pixel=f_camera/pixelsize
num_of_img=64
#linear displacement of the camera
del_d=(backward_translation-forward_translation)/num_of_img #in meter
del_d_pixel=del_d/pixelsize #in pixel
d_pixel=max_camera_distnace/pixelsize
d_list=np.zeros([num_of_img])
for i in range(num_of_img):
d_list[i]=d_pixel-i*del_d_pixel
d_ref_pixel=d_list[63]
scaling=np.zeros([num_of_img])
shift_x=np.zeros([num_of_img])
shift_y= | np.zeros([num_of_img]) | numpy.zeros |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import shutil
import sys
import os
import xml
import cv2
import math
import random
import matplotlib.pyplot as plot
from scipy import misc
import argparse
from time import sleep
gender_content = ('male', 'female')
glasses_content = ('wearing glasses', 'not wearing glasses')
sampleTextSetFile = ["training.txt", "testing.txt"]
root_dir = "../../../dataset/facedata/mtfl/"
margin = 32
labelDir = 'label'
cropImgdir = 'cropImg'
if not os.path.isdir(root_dir+ cropImgdir):
os.mkdir(root_dir+ cropImgdir)
def extractTrainLabfile(split_file):
mainSetFile = open(root_dir+ "ImageSets/Main/" +str(split_file.split("/")[-1]), "w")
print(mainSetFile)
with open(split_file, 'r') as label_file:
while True:
img_file_info = label_file.readline().split(' ')
if len(img_file_info) <= 2:
break
img_filename = img_file_info[0]
img_filename = img_filename.replace('\\', '/')
img_file =root_dir+ "JPEGImages/" + img_filename
source_img = cv2.imread(img_file)
assert source_img.shape[2]==3
fullImg = os.path.abspath(img_file) + '\n'
if 1:
print("##################################")
print("imgfile path: ", img_file)
print("imgfile_name: ", img_filename)
print ("anno img file: ", "annoImage/"+str(img_file.split("/")[-1]))
xmin = int(img_file_info[1])
xmax = int(img_file_info[2])
ymin = int(img_file_info[3])
ymax = int(img_file_info[4])
x1 = float(img_file_info[5])
y1 = float(img_file_info[6])
x2 = float(img_file_info[7])
y2 = float(img_file_info[8])
x3 = float(img_file_info[9])
y3 = float(img_file_info[10])
x4 = float(img_file_info[11])
y4 = float(img_file_info[12])
x5 = float(img_file_info[13])
y5 = float(img_file_info[14])
if len(img_file_info) == 19:
gender = img_file_info[15]
glass = img_file_info[17]
elif len(img_file_info) == 17:
gender = img_file_info[15]
glass = img_file_info[16]
x =[x1, x2, x3, x4, x5]
x_arrary = np.array(x)
x_max = x_arrary[np.argmax(x_arrary)]
x_min = x_arrary[np.argmin(x_arrary)]
y =[y1, y2, y3, y4, y5]
y_arrary = np.array(y)
y_max = y_arrary[np.argmax(y_arrary)]
y_min = y_arrary[np.argmin(y_arrary)]
img = cv2.cvtColor(source_img, cv2.COLOR_BGR2RGB)
img_size = np.asarray(img.shape)[0:2]
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(xmin-margin/2, 0)
bb[1] = np.maximum(ymin-margin/2, 0)
bb[2] = np.minimum(xmax + margin/2, img_size[1])
bb[3] = np.minimum(ymax + margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
cropped = cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR)
cv2.imwrite(root_dir+"annoImage/"+str(img_file.split("/")[-1]), cropped)
x11 = x1 - bb[0]
x22 = x2 - bb[0]
x33 = x3 - bb[0]
x44 = x4 - bb[0]
x55 = x5 - bb[0]
y11 = y1 - bb[1]
y22 = y2 - bb[1]
y33 = y3 - bb[1]
y44 = y4 - bb[1]
y55 = y5 - bb[1]
x_a = [x11, x22, x33, x44, x55]
x_crop = np.array(x_a).astype(np.int32)
y_a = [y11, y22, y33, y44, y55]
y_crop = | np.array(y_a) | numpy.array |
import os
import nibabel as nib
import numpy as np
import torch
import utils.common_utils as common_utils
import utils.data_utils as du
import torch.nn.functional as F
import shot_batch_sampler as SB
def dice_score_binary(vol_output, ground_truth, no_samples=10, phase='train'):
ground_truth = ground_truth.type(torch.FloatTensor)
vol_output = vol_output.type(torch.FloatTensor)
if phase == 'train':
samples = np.random.choice(len(vol_output), no_samples)
vol_output, ground_truth = vol_output[samples], ground_truth[samples]
inter = 2 * torch.sum(torch.mul(ground_truth, vol_output))
union = torch.sum(ground_truth) + torch.sum(vol_output) + 0.0001
return torch.div(inter, union)
def dice_confusion_matrix(vol_output, ground_truth, num_classes, no_samples=10, mode='train'):
dice_cm = torch.zeros(num_classes, num_classes)
if mode == 'train':
samples = np.random.choice(len(vol_output), no_samples)
vol_output, ground_truth = vol_output[samples], ground_truth[samples]
for i in range(num_classes):
GT = (ground_truth == i).float()
for j in range(num_classes):
Pred = (vol_output == j).float()
inter = torch.sum(torch.mul(GT, Pred))
union = torch.sum(GT) + torch.sum(Pred) + 0.0001
dice_cm[i, j] = 2 * torch.div(inter, union)
avg_dice = torch.mean(torch.diagflat(dice_cm))
return avg_dice, dice_cm
def get_range(volume):
batch, _, _ = volume.size()
slice_with_class = torch.sum(volume.view(batch, -1), dim=1) > 10
index = slice_with_class[:-1] - slice_with_class[1:] > 0
seq = torch.Tensor(range(batch - 1))
range_index = seq[index].type(torch.LongTensor)
return range_index
def dice_score_perclass(vol_output, ground_truth, num_classes, no_samples=10, mode='train'):
dice_perclass = torch.zeros(num_classes)
if mode == 'train':
samples = np.random.choice(len(vol_output), no_samples)
vol_output, ground_truth = vol_output[samples], ground_truth[samples]
for i in range(num_classes):
GT = (ground_truth == i).float()
Pred = (vol_output == i).float()
inter = torch.sum(torch.mul(GT, Pred))
union = torch.sum(GT) + torch.sum(Pred) + 0.0001
dice_perclass[i] = (2 * torch.div(inter, union))
return dice_perclass
def binarize_label(volume, groud_truth, class_label):
groud_truth = (groud_truth == class_label).type(torch.FloatTensor)
batch, _, _ = groud_truth.size()
slice_with_class = torch.sum(groud_truth.view(batch, -1), dim=1) > 10
index = slice_with_class[:-1] - slice_with_class[1:] > 0
seq = torch.Tensor(range(batch - 1))
range_index = seq[index].type(torch.LongTensor)
groud_truth = groud_truth[slice_with_class]
volume = volume[slice_with_class]
condition_input = torch.cat((volume, groud_truth.unsqueeze(1)), dim=1)
return condition_input, range_index.cpu().numpy()
def evaluate_dice_score(model_path,
num_classes,
query_labels,
data_dir,
query_txt_file,
support_txt_file,
remap_config,
orientation,
prediction_path, device=0, logWriter=None, mode='eval', fold=None):
print("**Starting evaluation. Please check tensorboard for plots if a logWriter is provided in arguments**")
print("Loading model => " + model_path)
batch_size = 20
Num_support = 10
with open(query_txt_file) as file_handle:
volumes_query = file_handle.read().splitlines()
# with open(support_txt_file) as file_handle:
# volumes_support = file_handle.read().splitlines()
model = torch.load(model_path)
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.cuda.empty_cache()
model.cuda(device)
model.eval()
common_utils.create_if_not(prediction_path)
print("Evaluating now... " + fold)
query_file_paths = du.load_file_paths(data_dir, data_dir, query_txt_file)
support_file_paths = du.load_file_paths(data_dir, data_dir, support_txt_file)
with torch.no_grad():
all_query_dice_score_list = []
for query_label in query_labels:
volume_dice_score_list = []
# Loading support
support_volume, support_labelmap, _, _ = du.load_and_preprocess(support_file_paths[0],
orientation=orientation,
remap_config=remap_config)
support_volume = support_volume if len(support_volume.shape) == 4 else support_volume[:, np.newaxis, :,
:]
support_volume, support_labelmap = torch.tensor(support_volume).type(torch.FloatTensor), \
torch.tensor(support_labelmap).type(torch.LongTensor)
support_volume, range_index = binarize_label(support_volume, support_labelmap, query_label)
# slice_gap_support = int(np.ceil(len(support_volume) / Num_support))
#
# support_slice_indexes = [i for i in range(0, len(support_volume), slice_gap_support)]
#
# if len(support_slice_indexes) < Num_support:
# support_slice_indexes.append(len(support_volume) - 1)
for vol_idx, file_path in enumerate(query_file_paths):
query_volume, query_labelmap, _, _ = du.load_and_preprocess(file_path,
orientation=orientation,
remap_config=remap_config)
query_volume = query_volume if len(query_volume.shape) == 4 else query_volume[:, np.newaxis, :, :]
query_volume, query_labelmap = torch.tensor(query_volume).type(torch.FloatTensor), \
torch.tensor(query_labelmap).type(torch.LongTensor)
query_labelmap = query_labelmap == query_label
range_query = get_range(query_labelmap)
query_volume = query_volume[range_query[0]: range_query[1] + 1]
query_labelmap = query_labelmap[range_query[0]: range_query[1] + 1]
dice_per_slice = []
vol_output = []
for i, query_slice in enumerate(query_volume):
query_batch_x = query_slice.unsqueeze(0)
max_dice = -1.0
max_output = None
for j in range(0, len(support_volume), 10):
support_slice = support_volume[j]
support_batch_x = support_slice.unsqueeze(0)
if cuda_available:
query_batch_x = query_batch_x.cuda(device)
support_batch_x = support_batch_x.cuda(device)
weights = model.conditioner(support_batch_x)
out = model.segmentor(query_batch_x, weights)
_, batch_output = torch.max(F.softmax(out, dim=1), dim=1)
slice_dice_score = dice_score_binary(batch_output,
query_labelmap[i].cuda(device), phase=mode)
dice_per_slice.append(slice_dice_score.item())
if slice_dice_score.item() >= max_dice:
max_dice = slice_dice_score.item()
max_output = batch_output
# dice_per_slice.append(max_dice)
vol_output.append(max_output)
vol_output = torch.cat(vol_output)
volume_dice_score = dice_score_binary(vol_output, query_labelmap.cuda(device), phase=mode)
volume_dice_score_list.append(volume_dice_score)
print(volume_dice_score)
dice_score_arr = np.asarray(volume_dice_score_list)
avg_dice_score = np.median(dice_score_arr)
print('Query Label -> ' + str(query_label) + ' ' + str(avg_dice_score))
all_query_dice_score_list.append(avg_dice_score)
print("DONE")
return np.mean(all_query_dice_score_list)
def evaluate_dice_score_2view(model1_path,
model2_path,
num_classes,
query_labels,
data_dir,
query_txt_file,
support_txt_file,
remap_config,
orientation1,
prediction_path, device=0, logWriter=None, mode='eval', fold=None):
print("**Starting evaluation. Please check tensorboard for plots if a logWriter is provided in arguments**")
print("Loading model => " + model1_path + " and " + model2_path)
batch_size = 10
with open(query_txt_file) as file_handle:
volumes_query = file_handle.read().splitlines()
# with open(support_txt_file) as file_handle:
# volumes_support = file_handle.read().splitlines()
model1 = torch.load(model1_path)
model2 = torch.load(model2_path)
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.cuda.empty_cache()
model1.cuda(device)
model2.cuda(device)
model1.eval()
model2.eval()
common_utils.create_if_not(prediction_path)
print("Evaluating now... " + fold)
query_file_paths = du.load_file_paths(data_dir, data_dir, query_txt_file)
support_file_paths = du.load_file_paths(data_dir, data_dir, support_txt_file)
with torch.no_grad():
all_query_dice_score_list = []
for query_label in query_labels:
volume_dice_score_list = []
for vol_idx, file_path in enumerate(support_file_paths):
# Loading support
support_volume1, support_labelmap1, _, _ = du.load_and_preprocess(file_path,
orientation=orientation1,
remap_config=remap_config)
support_volume2, support_labelmap2 = support_volume1.transpose((1, 2, 0)), support_labelmap1.transpose(
(1, 2, 0))
support_volume1 = support_volume1 if len(support_volume1.shape) == 4 else support_volume1[:, np.newaxis,
:, :]
support_volume2 = support_volume2 if len(support_volume2.shape) == 4 else support_volume2[:, np.newaxis,
:, :]
support_volume1, support_labelmap1 = torch.tensor(support_volume1).type(
torch.FloatTensor), torch.tensor(
support_labelmap1).type(torch.LongTensor)
support_volume2, support_labelmap2 = torch.tensor(support_volume2).type(
torch.FloatTensor), torch.tensor(
support_labelmap2).type(torch.LongTensor)
support_volume1 = binarize_label(support_volume1, support_labelmap1, query_label)
support_volume2 = binarize_label(support_volume2, support_labelmap2, query_label)
for vol_idx, file_path in enumerate(query_file_paths):
query_volume1, query_labelmap1, _, _ = du.load_and_preprocess(file_path,
orientation=orientation1,
remap_config=remap_config)
query_volume2, query_labelmap2 = query_volume1.transpose((1, 2, 0)), query_labelmap1.transpose(
(1, 2, 0))
query_volume1 = query_volume1 if len(query_volume1.shape) == 4 else query_volume1[:, np.newaxis, :, :]
query_volume2 = query_volume2 if len(query_volume2.shape) == 4 else query_volume2[:, np.newaxis, :, :]
query_volume1, query_labelmap1 = torch.tensor(query_volume1).type(torch.FloatTensor), torch.tensor(
query_labelmap1).type(torch.LongTensor)
query_volume2, query_labelmap2 = torch.tensor(query_volume2).type(torch.FloatTensor), torch.tensor(
query_labelmap2).type(torch.LongTensor)
query_labelmap1 = query_labelmap1 == query_label
query_labelmap2 = query_labelmap2 == query_label
# Evaluate for orientation 1
support_batch_x = []
k = 2
volume_prediction1 = []
for i in range(0, len(query_volume1), batch_size):
query_batch_x = query_volume1[i: i + batch_size]
if k % 2 == 0:
support_batch_x = support_volume1[i: i + batch_size]
sz = query_batch_x.size()
support_batch_x = support_batch_x[batch_size - 1].repeat(sz[0], 1, 1, 1)
k += 1
if cuda_available:
query_batch_x = query_batch_x.cuda(device)
support_batch_x = support_batch_x.cuda(device)
weights = model1.conditioner(support_batch_x)
out = model1.segmentor(query_batch_x, weights)
# _, batch_output = torch.max(F.softmax(out, dim=1), dim=1)
volume_prediction1.append(out)
# Evaluate for orientation 2
support_batch_x = []
k = 2
volume_prediction2 = []
for i in range(0, len(query_volume2), batch_size):
query_batch_x = query_volume2[i: i + batch_size]
if k % 2 == 0:
support_batch_x = support_volume2[i: i + batch_size]
sz = query_batch_x.size()
support_batch_x = support_batch_x[batch_size - 1].repeat(sz[0], 1, 1, 1)
k += 1
if cuda_available:
query_batch_x = query_batch_x.cuda(device)
support_batch_x = support_batch_x.cuda(device)
weights = model2.conditioner(support_batch_x)
out = model2.segmentor(query_batch_x, weights)
volume_prediction2.append(out)
volume_prediction1 = torch.cat(volume_prediction1)
volume_prediction2 = torch.cat(volume_prediction2)
volume_prediction = 0.5 * volume_prediction1 + 0.5 * volume_prediction2.permute(3, 1, 0, 2)
_, batch_output = torch.max(F.softmax(volume_prediction, dim=1), dim=1)
volume_dice_score = dice_score_binary(batch_output, query_labelmap1.cuda(device), phase=mode)
batch_output = (batch_output.cpu().numpy()).astype('float32')
nifti_img = nib.MGHImage(np.squeeze(batch_output), | np.eye(4) | numpy.eye |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Nov 27 2019
@author: changyuchang
"""
import numpy as np
from functools import partial
def no_selection(community_function):
"""
Direct well-to-well transfer without selection
"""
n_wells = len(community_function)
return np.eye(n_wells)
# Make selection algorithms with similar names, using partial functions
## Select top n%
def temp_select_top(community_function, p):
n_wells = len(community_function)
sorted_community_function = np.sort(community_function)
cut_off = sorted_community_function[int(np.floor(len(community_function)*(1-p)))]
winner_index = np.where(community_function >= cut_off)[0][::-1]
transfer_matrix = np.zeros((n_wells,n_wells))
t_new = range(n_wells) # New wells
t_old = list(winner_index) * (int(np.ceil(1/p) + 1)) # Old wells
for i in range(n_wells):
transfer_matrix[t_new[i], t_old[i]] = 1
return transfer_matrix
for i in [10, 15, 16, 20, 25, 28, 30, 33, 40, 50, 60]:
globals()['select_top%spercent' %i] = partial(temp_select_top, p = i/100)
## Select top n% control
def temp_select_top_control(community_function, p):
n_wells = len(community_function)
randomized_community_function = community_function.copy()
np.random.shuffle(randomized_community_function)
sorted_community_function = np.sort(randomized_community_function)
cut_off = sorted_community_function[int(np.floor(len(randomized_community_function)*(1-p)))]
winner_index = np.where(randomized_community_function >= cut_off)[0][::-1]
transfer_matrix = np.zeros((n_wells,n_wells))
t_new = range(n_wells) # New wells
t_old = list(winner_index) * (int(np.ceil(1/p)+1)) # Old wells
for i in range(n_wells):
transfer_matrix[t_new[i], t_old[i]] = 1
return transfer_matrix
for i in [10, 15, 16, 20, 25, 28, 30, 33, 40, 50, 60]:
globals()['select_top%spercent_control' %i] = partial(temp_select_top_control, p = i/100)
## Pooling
def temp_pool_top(community_function, p):
n_wells = len(community_function)
sorted_community_function = np.sort(community_function)
cut_off = sorted_community_function[int(np.floor(len(community_function)*(1-p)))]
winner_index = np.where(community_function >= cut_off)[0][::-1]
transfer_matrix = np.zeros((n_wells,n_wells))
transfer_matrix[:, list(winner_index)] = 1
return transfer_matrix
for i in [10, 15, 16, 20, 25, 28, 30, 33, 40, 50, 60]:
globals()['pool_top%spercent' %i] = partial(temp_pool_top, p = i/100)
## Pooling control
def temp_pool_top_control(community_function, p):
n_wells = len(community_function)
randomized_community_function = community_function.copy()
np.random.shuffle(randomized_community_function)
sorted_community_function = np.sort(randomized_community_function)
cut_off = sorted_community_function[int(np.floor(len(randomized_community_function)*(1-p)))]
winner_index = np.where(randomized_community_function >= cut_off)[0][::-1]
transfer_matrix = np.zeros((n_wells,n_wells))
transfer_matrix[:, winner_index] = 1
return transfer_matrix
for i in [10, 15, 16, 20, 25, 28, 30, 33, 40, 50, 60]:
globals()['pool_top%spercent_control' %i] = partial(temp_pool_top_control, p = i/100)
# Sub-lineage algorithms
def Arora2019(community_function, n_rep = 3):
"""
Arora2019
Sub-divide wells of plate into lines where each 'line' consists of n_rep communities'
Each round the highest function member of the line is used to colonize the next three triplicate wells of that line
"""
n_wells = len(community_function)
n_lines = int(np.ceil(n_wells/n_rep)) #Number of lines
transfer_matrix = | np.zeros((n_wells,n_wells)) | numpy.zeros |
import threading
import time
import numpy as np
import open3d as o3d
import transforms3d
from t3d import t3d
import os
from pathlib import Path
from PIL import Image, ImageFont, ImageDraw
from matplotlib import font_manager
LOCK = threading.Lock()
OPENCV_2_OPENGL = np.array(
[
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
]
)
def get_font():
font = font_manager.findfont(font_manager.FontProperties(family=['sans-serif']))
return font
def text_3d(text, pos, font, camera_pose=None, density=10):
"""
Generate a 3D text point cloud used for visualization.
:param text: content of the text
:param pos: 3D xyz position of the text upper left corner
:param direction: 3D normalized direction of where the text faces
:param degree: in plane rotation of text
:param font: Name of the font - change it according to your system
:param font_size: size of the font
:return: o3d.geoemtry.PointCloud object
"""
font_dim = font.getsize(text)
img = Image.new('RGB', font_dim, color=(255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, font=font, fill=(0, 0, 0))
img = np.asarray(img)
img_mask = img[:, :, 0] < 128
indices = np.indices([*img.shape[0:2], 1])[:, img_mask, 0].reshape(3, -1).T
pcd = o3d.geometry.PointCloud()
pcd.colors = o3d.utility.Vector3dVector(img[img_mask, :].astype(float) / 255.0)
pcd.points = o3d.utility.Vector3dVector(indices / 100.0 / density)
pos = t3d.apply_T(OPENCV_2_OPENGL, np.array([pos]))[0]
if camera_pose is not None:
tf = np.matmul(np.linalg.inv(camera_pose[:3, :3]), transforms3d.euler.euler2mat(*np.radians([0, 0, 90])))
tf = np.matmul(tf, transforms3d.euler.euler2mat(*np.radians([180, 0, 0])))
pcd.rotate(tf)
pcd.translate(pos)
return pcd
def draw_camera(pose):
height = 0.15
width = 0.2
depth = 0.2
points = np.array(
[
[0, 0, 0],
[-width, height, depth],
[width, height, depth],
[width, -height, depth],
[-width, -height, depth],
]
).reshape(-1, 3)
lines = np.array(
[
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 1],
[0, 1],
[0, 2],
[0, 3],
[0, 4],
]
)
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(points)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.paint_uniform_color([0, 0, 0])
line_set.transform(np.matmul(pose, OPENCV_2_OPENGL))
return line_set
def o3d_frame(transform=np.identity(4), colour=None):
frame_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()
if colour is not None:
frame_mesh.paint_uniform_color(colour)
frame_mesh.transform(transform)
frame_mesh.transform(OPENCV_2_OPENGL)
return frame_mesh
def optical_frame(transform, pose_2_cam=np.identity(4)):
return o3d_frame(np.matmul(transform, pose_2_cam))
def o3d_pointcloud(points, colour=(1, 0, 0)):
pointcloud_marker = o3d.geometry.PointCloud()
if points is not None:
pointcloud_marker.points = o3d.utility.Vector3dVector(
t3d.apply_T(OPENCV_2_OPENGL, points)
)
pointcloud_marker.paint_uniform_color(colour)
return pointcloud_marker
def draw_trajectory(trajectory):
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(
t3d.apply_T(OPENCV_2_OPENGL, trajectory[:, 1:4])
)
indices = np.arange(len(trajectory))
line_set.lines = o3d.utility.Vector2iVector(
[(a, b) for a, b in zip(indices, indices[1:])]
)
return line_set
class Visualiser:
def __init__(self, vo_function, *args, **kwargs):
"""Tool for visualising frames/poses and pointclouds
Args:
vo_function: main function to run the vo algorithm
*args: arguments to pass to vo_function
"""
self.running = True
self.vis = o3d.visualization.Visualizer()
self.vis.create_window()
self.frames = dict()
self.frame_markers = dict()
self.text = dict()
self.pointclouds = dict()
self.pointcloud_markers = dict()
self.vis.add_geometry(o3d_frame())
self.font = ImageFont.truetype(get_font(), 18 * 10)
# threading.Thread(target=vo_function, args=[*args, self]).start()
kwargs["visualiser"] = self
threading.Thread(target=vo_function, args=args, kwargs=kwargs).start()
def add_frame(self, id_, pose, colour):
"""Adds a frame to be visualised
Args:
id_: ID of the frame so it can be looked up/modified later
pose: pose of the frame
colour: colour of the frame specified as (r, g, b) [0-1]
"""
self.frames[id_] = pose
self.text[id_] = text_3d(id_, pose[:3, 3] + np.array([0.1, 0.1, 0.1]), self.font)
self.vis.add_geometry(self.text[id_])
self.frame_markers[id_] = o3d_frame(pose, colour)
self.vis.add_geometry(self.frame_markers[id_])
def add_pointcloud(self, id_, points, colour):
self.pointclouds[id_] = points
self.pointcloud_markers[id_] = (o3d_pointcloud(points, colour), colour)
self.vis.add_geometry(self.pointcloud_markers[id_][0])
def update_frame(self, id_, pose):
"""Update the pose of the frame. Should be used within the vo function.
Args:
id_: id of the frame to update
pose: pose to set the frame to
"""
self.frames[id_] = pose
def _update_frame_markers(self):
"""Updates the visualisation so the markers reflect the poses
specified by update_frame().
Should be called from within visualiser.run
"""
for id_, pose in self.frames.items():
if id_ in self.frame_markers:
self.frame_markers[id_].vertices = optical_frame(pose).vertices
self.vis.update_geometry(self.frame_markers[id_])
new_text = text_3d(id_, pose[:3, 3] + np.array([0.1, 0.1, 0.1]), self.font, self._camera_view_pose())
self.text[id_].points = new_text.points
self.vis.update_geometry(self.text[id_])
def set_start_view(self, zoom=4):
"""Sets the direction of the camera/viewport"""
view_ctl = self.vis.get_view_control()
view_ctl.set_up([1, 0, 0])
view_ctl.set_front([0, 0, -1])
view_ctl.set_lookat([1, 0, 0])
view_ctl.set_zoom(zoom)
view_ctl.set_constant_z_far(20)
def _update_pointcloud_marker(self):
"""Updates the markers for the point clouds. Should be called from visualiser.run.
Should be called after the markers have been added.
"""
for id_, points in self.pointclouds.items():
if id_ in self.pointcloud_markers:
self.pointcloud_markers[id_][0].points = o3d_pointcloud(self.pointclouds[id_]).points
self.pointcloud_markers[id_][0].paint_uniform_color(self.pointcloud_markers[id_][1])
self.vis.update_geometry(self.pointcloud_markers[id_][0])
def _camera_view_pose(self):
return self.vis.get_view_control().convert_to_pinhole_camera_parameters().extrinsic
def run(self):
"""Start the visualiser"""
while self.running:
self._update_frame_markers()
self._update_pointcloud_marker()
self.vis.update_renderer()
self.vis.poll_events()
time.sleep(0.05)
self.vis.destroy_window()
def stop(self):
"""Stop the visualiser"""
self.running = False
def run(visualiser):
"""Example run function"""
T_wc = np.identity(4)
T_wc[:3, 3] = | np.array([1, 1.5, 2]) | numpy.array |
## IMPORTS ##
import logging # Logging: provides a set of convenience functions for simple logging usage
import time # Time: provides various time-related functions
import numpy as np # NumPy: the fundamental package for scientific computing with Python
import cv2 # OpenCV: usage ranges from interactive art, to mines inspection, stitching maps on the web or through advanced robotics
from sklearn.cluster import MiniBatchKMeans # scikit-learn: Simple and efficient tools for data mining and data analysis
## CLASSES ##
class DataExtractor:
# __init__
# --------
# Initializes an instance of PhotoExtractor
def __init__(self, aMinThresh, aMaxThresh, bMinThresh, bMaxThresh):
# Creates logger
self.logger = logging.getLogger("DataExtractor")
# Stores variables
self.aMinThresh = aMinThresh
self.aMaxThresh = aMaxThresh
self.bMinThresh = bMinThresh
self.bMaxThresh = bMaxThresh
# extract
# --------
# Starts the extraction on the chosen frame
def extract(self, frame):
self.logger.info("started ...")
start = time.time()
# Background Extraction
self.logger.info("starting background extraction ...")
processedFrame = self.backgroundExtraction(frame)
self.logger.info("background extraction ended")
# Sample Detection and Data Extraction
self.logger.info("starting sample detection ...")
data = self.sampleDetection(processedFrame)
self.logger.info("sample detection ended")
end = time.time()
self.logger.info("ended, took {}s".format(end - start))
return data
def checkMaskResult(self, frame):
# Converting frame to Lab and splitting it
frameLab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
_, a, b = cv2.split(frameLab)
# Formatting the components that are being used, a and b, and removing 0s
a = a.flatten()
b = b.flatten()
# Extract min and max from components
aMin = np.amin(a)
aMax = np.amax(a)
bMin = np.amin(b)
bMax = np.amax(b)
# Verify if the extracted data is ok according to the threshold values
if aMin >= self.aMinThresh and aMax <= self.aMaxThresh and bMin >= self.bMinThresh and bMax <= self.bMaxThresh:
return False
else:
return True
def backgroundExtraction(self, frame):
# Storing frame dimensions
width = len(frame[0])
height = len(frame)
# Converts frame to L*a*b* color space
frameLab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
# Splits frame into L*, a* and b*, but stores only sample b*
_, _, frameB = cv2.split(frameLab)
# Reshaping frame b* into a list of pixels
reshapedFrame = frameB.reshape((frameB.shape[0] * frameB.shape[1], 1))
# Creating K-Means object
clt = MiniBatchKMeans(n_clusters = 2, random_state = 5)
# Calculating K-Means
self.logger.info("starting k-means clustering ...")
clt.fit(reshapedFrame)
self.logger.info("k-means clustering ended")
labels = clt.labels_
# Turning K-Means results into a mask
mask = np.uint8(labels).reshape((height, width))
# Noise removal
self.logger.info("starting noise removal ...")
kernel = np.ones((3,3), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations = 3)
self.logger.info("noise removal ended")
# Applying mask to original image
kMeansFrame = cv2.bitwise_and(frame, frame, mask = mask)
# Verifying if the mask needs inversal
if self.checkMaskResult(kMeansFrame) == True:
# Inverting mask and applying to original image
mask = 1 - mask
kMeansFrame = cv2.bitwise_and(frame, frame, mask = mask)
return kMeansFrame
def sampleDetection(self, frame):
# Storing frame dimensions
width = len(frame[0])
height = len(frame)
# Creates gray frame
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Finding its contours
self.logger.info("starting contour finding ...")
im2, contours, hierarchy = cv2.findContours(grayFrame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
self.logger.info("contour finding ended")
# For each contour, store its area
contourAreas = []
for contour in contours:
contourAreas.append( cv2.contourArea(contour) )
contourAreas = | np.array(contourAreas) | numpy.array |
#!/usr/bin/python3
"""
Program Name: enf_analysis.py
Created By: <NAME>
Description:
Program designed to extract ENF traces from audio files.
"""
# Import Required Libraries
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import scipy
from scipy.io import wavfile
import scipy.signal
from datetime import datetime
import time
import tqdm
# Global Variables
enf_freq = 50
low_freq = enf_freq - 1
high_freq = enf_freq + 1
def fir_bandpass(data, fs, lowpass, highpass, usr = 1, dsr = 1):
"""
Function Name: fir_bandpass
Description:
Make an FIR bandpass filter using the firwin and upfirdn
functions from scipy.signal.
Input(s):
data - data to filter.
fs - sampling rate.
lowpass - low frequency cutoff.
highpass - high frequency cutoff.
usr - upsample rate for upfirdn (optional. default = 1).
dsr - downsample rate for upfirdn (optional. default = 1).
Return(s):
y - filtered data.
"""
y = np.array([])
nyq = fs / 2
h_nyq = nyq
if (h_nyq % 2) == 0:
h_nyq += 1
h_low = lowpass / (nyq * 1.0)
h_high = highpass / (nyq * 1.0)
h = scipy.signal.firwin(fs+1, [h_low, h_high], pass_zero=False)
y = scipy.signal.upfirdn(h,data)
return y
def butter_bandpass(lowcut, highcut, nyq, order=None):
"""
Function Name: butter_bandpass
Description:
Function to setup butterworth bandpass filter and
return the proper coefficients.
Input(s):
lowcut - low cutoff frequency
highcut - high cutoff frequency
nyq - nyquist rate (sample_rate / 2)
order - filter order (optional. default = 2)
Return(s):
b , a - filter coefficients
"""
# Check If Optional Arg Is None
if order is None:
order = 2
# Set Bandpass Frequencies
low = lowcut / nyq
high = highcut / nyq
# Determine Coefficients For Filter Setup
b, a = scipy.signal.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, nyq, order=None):
"""
Function Name: butter_bandpass_filter
Description:
Function to setup and filter data using a butterworth
bandpass filter.
Input(s):
data - data to filter
lowcut - low cutoff frequency
highcut - high cutoff frequency
nyq - nyquist rate (sample_rate / 2)
order - order of filter (optional. default = 2)
Return(s):
y - filtered data
"""
# Check If Optional Arg Is None
if order is None:
order = 2
# Get Coefficients And Filter Signal
b, a = butter_bandpass(lowcut, highcut, nyq, order=order)
y = scipy.signal.lfilter(b, a, data)
# Return Filtered Data
return y
# Main Function
def main():
global enf_freq, low_freq, high_freq
showFirst = False
# Set Filename For Analysis
filename = "pc.wav"
#filename = "RR.wav"
print("-"*50)
fname_inp = input("[] Please Enter Filename [default = pc.wav]: ")
if not(fname_inp == ""):
filename = fname_inp
enf_inp = input("[] Please Input ENF Frequency [default = 50]: ")
if not(enf_inp == ""):
enf_freq = int(enf_inp)
harmonic = 1
upsample_order = 5
dnsample_order = 5
harmonic_inp = input("[] Please Enter Desired Harmonic [default = 1]: ")
if not(harmonic_inp == ""):
harmonic = int(harmonic_inp)
showFirst_inp = input("[] Show First STFT Window (y/n)? ")
showFirst_inp = showFirst_inp.lower()
if (showFirst_inp == "y"):
showFirst = True
elif ((showFirst_inp == "n") or (showFirst_inp == "")):
showFirst = False
else:
print(f"[!] Incorrect Input {showFirst_inp}. Defaulting to False")
showFirst = False
print("-"*50)
print(f"[+] Beginning Analysis [{filename}]")
try:
# Get Data & Sample Rate From File
sr, data = wavfile.read(filename)
data, sr = librosa.load(filename, sr=sr)
except Exception as e:
print("[!] Something Went Wrong Reading Audio File <{filename}> ... Exiting")
return
# Set Nyquist Rate (Sample Rate / 2)
nyq = int(sr / 2.0)
# Determine Time-Length And Set Axis For Plotting
time_len = (len(data) / (sr * 1.0))
x_ax = np.linspace(0, time_len, len(data))
# set frame size to .2 seconds
if time_len >= 1:
f_size = int((len(data) / time_len) * 0.2)
else:
f_size = int(len(data) / 50)
# Take FFT Of Data
fft_data = np.fft.fft(data)
fft_data = abs(fft_data * np.conj(fft_data))
x_ax_fft = np.linspace(0, sr, len(fft_data))
# Only Take 1st Half Of FFT Data To Avoid Mirroring
fft_data = fft_data[:nyq]
x_ax_fft = x_ax_fft[:nyq]
# Plot Unfiltered Data & FFT Of Data
plt.figure()
plt.subplot(211)
plt.title(f"Raw Data: {filename}")
plt.ylabel("Amplitude")
plt.xlabel("Time")
plt.plot(x_ax,data)
plt.subplot(212)
plt.title(f"FFT Of {filename}")
plt.ylabel("Magnitude")
plt.xlabel("Frequency (Hz)")
plt.plot(x_ax_fft,fft_data)
plt.tight_layout()
plt.show()
# Filter Data Using Bandpass With Low = 49 and High = 51 (or harmonic -- the multiplier)
filt_data = butter_bandpass_filter(np.append(data,np.zeros(data.size * 9)), low_freq*harmonic, high_freq*harmonic, nyq, order=2)
# Take FFT Of Filtered Data
fft_filt_data = np.fft.fft(filt_data)
fft_filt_data = abs(fft_filt_data * np.conj(fft_filt_data))
x_ax_fft_f = np.linspace(0, sr, len(fft_filt_data))
# Only Take 1st Half Of FFT To Prevent Mirroring
fft_filt_data = fft_filt_data[:nyq]
f_filtd_freq = | np.fft.fftfreq(fft_filt_data.size, d = 2./sr) | numpy.fft.fftfreq |
import numpy as np
from matplotlib.patches import Polygon
from feastruct.post.post import ScalarResult
class BoundaryCondition:
"""Parent class for supports and loads.
Provides an init method for the creation of boundary conditions.
:cvar node: The node object at which the boundary condition acts
:vartype node: :class:`~feastruct.fea.node.Node`
:cvar float val: The value of the boundary condition
:cvar int dof: The degree of freedom about which the boundary condition acts
"""
def __init__(self, node, val, dof):
"""Inits the BoundaryCondition class.
:param node: The node object at which the boundary condition acts
:type node: :class:`~feastruct.fea.node.Node`
:param float val: The value of the boundary condition
:param int dof: The degree of freedom about which the boundary condition acts
"""
# assign the node object, value and dof of the boundary condition
self.node = node
self.val = val
self.dof = dof
def get_gdof(self):
"""Returns the global degree of freedom number for the boundary condition.
:returns: Global degree of freedom number
:rtype: int
"""
return self.node.dofs[self.dof].global_dof_num
class NodalSupport(BoundaryCondition):
"""Class for a dirichlet boundary condition acting at a node.
Provides methods for the FEA solver and post-processing.
:cvar node: The node object at which the boundary condition acts
:vartype node: :class:`~feastruct.fea.node.Node`
:cvar float val: The value of the boundary condition
:cvar int dof: The degree of freedom about which the boundary condition acts
:cvar reactions: A list of reaction objects
:vartype reactions: list[:class:`~feastruct.post.post.ScalarResult`]
"""
def __init__(self, node, val, dof):
"""inits the NodalSupport class.
:param node: The node object at which the boundary condition acts
:type node: :class:`~feastruct.fea.node.Node`
:param float val: The value of the boundary condition
:param int dof: The degree of freedom about which the boundary condition acts
"""
# initialise the parent class
super().__init__(node, val, dof)
# initialise the nodal reaction results
self.reactions = []
def apply_support(self, K, f_ext):
"""Applies the nodal support.
The stiffness matrix and external force vector are modified to apply the dirichlet boundary
condition to enforce the displacement at the chosen degree of freedom to be equal to the
specified value.
:param K: Global stiffness matrix of size *(N x N)*
:type K: :class:`numpy.ndarray`
:param f_ext: Global external force vector of size *N*
:type f_ext: :class:`numpy.ndarray`
"""
# get gdof number for the support
gdof = self.node.dofs[self.dof].global_dof_num
# modify stiffness matrix and f_ext
K[gdof, :] = 0
K[gdof, gdof] = 1
f_ext[gdof] = self.val
def get_reaction(self, analysis_case):
"""Gets the reaction force result corresponding to analysis_case.
:param analysis_case: Analysis case
:type analysis_case: :class:`~feastruct.fea.cases.AnalysisCase`
:returns: Reaction force at the node
:rtype: float
"""
# loop through reactions
for reaction in self.reactions:
if reaction.analysis_case == analysis_case:
return reaction.result
def save_reaction(self, f, analysis_case):
"""Saves the reaction force corresponding to analysis_case.
:param float f: Reaction force at the node
:param analysis_case: Analysis case
:type analysis_case: :class:`~feastruct.fea.cases.AnalysisCase`
"""
# check to see if there is already a reaction for the current analysis_case
for reaction in self.reactions:
if reaction.analysis_case == analysis_case:
reaction.result = f
return
# if there isn't already a reaction for the current analysis_case
self.reactions.append(ScalarResult(result=f, analysis_case=analysis_case))
def plot_support(self, ax, small, get_support_angle, analysis_case, deformed, def_scale):
"""Plots a graphical representation of the nodal support.
Based on the type of support at the node, a graphical representation of the support type is
generated and plotted. Possible support types include rollers, hinges, rotation restraints,
fixed rollers and fully fixed supports. The angle of the connecting elements is considered
in order to produce the most visually appealing representation. The support location is
displaced if a deformed plot is desired. Note that some of the methods used to plot the
supports are taken from <NAME>'s code plotGeom.m.
:param ax: Axes object on which to plot
:type ax: :class:`matplotlib.axes.Axes`
:param float small: A dimension used to scale the support
:param get_support_angle: A function that returns the support angle and the number of
connected elements
:type get_support_angle: :func:`feastruct.post.post.PostProcessor.get_support_angle`
:param analysis_case: Analysis case
:type analysis_case: :class:`~feastruct.fea.cases.AnalysisCase`
:param bool deformed: Represents whether or not the node locations are deformed based on
the results of analysis_case
:param float def_scale: Value used to scale deformations
"""
fixity = analysis_case.freedom_case.get_nodal_fixities(node=self.node)
if fixity not in ([1, 1, 0], [0, 1, 0]):
(angle, num_el) = get_support_angle(self.node)
if fixity == [1, 0, 0]:
# ploy a y-roller
angle = round(angle / 180) * 180
self.plot_xysupport(ax, angle, True, num_el == 1, small, analysis_case, deformed,
def_scale)
elif fixity == [0, 1, 0]:
(angle, num_el) = get_support_angle(self.node, 1)
# plot an x-roller
if np.mod(angle + 1, 180) < 2: # prefer support below
angle = 90
else:
angle = round((angle + 90) / 180) * 180 - 90
self.plot_xysupport(ax, angle, True, num_el == 1, small, analysis_case, deformed,
def_scale)
elif fixity == [1, 1, 0]:
# plot a hinge
(angle, num_el) = get_support_angle(self.node, 1)
self.plot_xysupport(ax, angle, False, num_el == 1, small, analysis_case, deformed,
def_scale)
elif fixity == [0, 0, 1]:
ax.plot(self.node.x, self.node.y, 'kx', markersize=8)
else:
# plot a support with moment fixity
if fixity == [1, 1, 1]:
# plot a fixed support
s = np.sin(angle * np.pi / 180)
c = np.cos(angle * np.pi / 180)
rot_mat = np.array([[c, -s], [s, c]])
line = np.array([[0, 0], [-1, 1]]) * small
rect = np.array([[-0.6, -0.6, 0, 0], [-1, 1, 1, -1]]) * small
ec = 'none'
elif fixity == [1, 0, 1]:
# plot y-roller block
angle = round(angle / 180) * 180
s = np.sin(angle * np.pi / 180)
c = np.cos(angle * np.pi / 180)
rot_mat = np.array([[c, -s], [s, c]])
line = np.array([[-0.85, -0.85], [-1, 1]]) * small
rect = np.array([[-0.6, -0.6, 0, 0], [-1, 1, 1, -1]]) * small
ec = 'k'
elif fixity == [0, 1, 1]:
# plot x-roller block
angle = round((angle + 90) / 180) * 180 - 90
s = np.sin(angle * np.pi / 180)
c = np.cos(angle * np.pi / 180)
rot_mat = np.array([[c, -s], [s, c]])
line = np.array([[-0.85, -0.85], [-1, 1]]) * small
rect = np.array([[-0.6, -0.6, 0, 0], [-1, 1, 1, -1]]) * small
ec = 'k'
rot_line = np.matmul(rot_mat, line)
rot_rect = np.matmul(rot_mat, rect)
# add coordinates of node
if deformed:
# get displacement of node for current analysis case
u = [0, 0]
u[0] = self.node.dofs[0].get_displacement(analysis_case)
u[1] = self.node.dofs[1].get_displacement(analysis_case)
rot_line[0, :] += self.node.x + u[0] * def_scale
rot_line[1, :] += self.node.y + u[1] * def_scale
rot_rect[0, :] += self.node.x + u[0] * def_scale
rot_rect[1, :] += self.node.y + u[1] * def_scale
else:
rot_line[0, :] += self.node.x
rot_line[1, :] += self.node.y
rot_rect[0, :] += self.node.x
rot_rect[1, :] += self.node.y
ax.plot(rot_line[0, :], rot_line[1, :], 'k-', linewidth=1)
ax.add_patch(Polygon(np.transpose(rot_rect), facecolor=(0.7, 0.7, 0.7), edgecolor=ec))
def plot_imposed_disp(self, ax, max_disp, small, get_support_angle, analysis_case, deformed,
def_scale):
"""Plots a graphical representation of an imposed translation.
:param ax: Axes object on which to plot
:type ax: :class:`matplotlib.axes.Axes`
:param float max_disp: Maximum imposed displacement in the analysis case
:param float small: A dimension used to scale the support
:param get_support_angle: A function that returns the support angle and the number of
connected elements
:type get_support_angle: :func:`feastruct.post.post.PostProcessor.get_support_angle`
:param analysis_case: Analysis case
:type analysis_case: :class:`~feastruct.fea.cases.AnalysisCase`
:param bool deformed: Represents whether or not the node locations are deformed based on
the results of case id
:param float def_scale: Value used to scale deformations
"""
val = self.val / max_disp
offset = 0.5 * small
lf = abs(val) * 1.5 * small # arrow length
lh = 0.6 * small # arrow head length
wh = 0.6 * small # arrow head width
sp = 0.15 * small # half spacing between double line
lf = max(lf, lh * 1.5)
(angle, num_el) = get_support_angle(self.node)
s = np.sin(angle * np.pi / 180)
c = np.cos(angle * np.pi / 180)
n = np.array([c, s])
inward = (n[self.dof] == 0 or np.sign(n[self.dof]) == np.sign(val))
to_rotate = (self.dof) * 90 + (n[self.dof] >= 0) * 180
sr = | np.sin(to_rotate * np.pi / 180) | numpy.sin |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from print_helper import conductance_to_value_exp, build_string, build_string_gen
import seaborn as sns
from matplotlib import lines
import matplotlib.gridspec as gridspec
import prinzdb
from print_helper import get_summ_stat_name, get_summ_stat_name_text, get_synapse_name, get_summ_stat_name_asterisk, scale_to_experimental
import sys
sys.path.append("../visualization")
import viz
from copy import deepcopy
import matplotlib.ticker
import matplotlib.patheffects as pe
def vis_sample(m, s, sample, hyperparams, t_on=None, t_off=None, with_ss=True, with_params=True,
mem_dimensions=None,mode2=None,
voltage_trace=None, time_len=None, fontscale=1.0, linescale=1.0, offset=0.0,
test_idx=None, case=None, title=None, date_today=None, counter=0, offset_labels=0.0, legend=True,
multiplier_cond_shift = 0.0, vis_legend=True, scale_bar=True,
ss_names=True, param_names=True, save_fig=False):
"""
Function of Kaan, modified by Michael. Used for plotting fig 5b Prinz.
:param m: generator object, from m = netio.create_simulators(params)[0]
:param s: summstat object, from s = netio.create_summstats(params)
:param sample: membrane/synaptic conductances
:param t_on:
:param t_off:
:param with_ss: bool, True if bars for summary stats are wanted
:param with_params: bool, True if bars for parameters are wanted
:return: figure object
"""
font_size = 15.0
if voltage_trace is None:
data = m.gen_single(sample)
else:
data = voltage_trace
Vx = data['data']
params = data['params']
stats = s.calc([data])[0]
stats_nan = deepcopy(stats)
stats[np.isnan(stats)] = 0.0
if hyperparams.include_plateau:
stats = stats[:-4]
stats = scale_to_experimental(stats)
bar_scaling_factors = [1.0, 10, 100, 10, 100, 1, 10000, 10000]
bar_scaling_factors = np.reshape(np.tile(bar_scaling_factors, 3), (3, 8))
bar_vals = bar_scaling_factors[np.asarray(hyperparams.use_membrane)]
if mem_dimensions is not None:
params_trunc = params[mem_dimensions].tolist()
params_trunc += params[-7:].tolist()
bar_vals = bar_vals[mem_dimensions]
params = np.asarray(params_trunc)
if with_params and with_ss:
fig = plt.figure(figsize=(11.3, 6))
gs = gridspec.GridSpec(2, 3, width_ratios=[len(stats), len(params[:-7]), len(params[-7:])],
wspace=0.25, height_ratios=[0.7, 0.3])
axV = plt.subplot(gs[0, :])
axss = plt.subplot(gs[1, 0])
axmemparams = plt.subplot(gs[1, 1])
axsynparams = plt.subplot(gs[1, 2])
elif with_params:
fig = plt.figure(figsize=(6, 7.5))
gs = gridspec.GridSpec(2, 2, width_ratios=[len(params[:-7]), len(params[-7:])],
hspace=0.1, wspace=0.38, height_ratios=[0.65, 0.35])
axV = plt.subplot(gs[0, :])
axmemparams = plt.subplot(gs[1, 0])
axsynparams = plt.subplot(gs[1, 1])
elif with_ss:
fig, (axV, axss) = plt.subplots(2, figsize=(14, 6))
else:
fig, axV = plt.subplots(1, figsize=(14, 3))
cols = ['#034e7b', '#0570b0', '#3690c0']
#cols = ['k', 'k', 'k']
current_col = 0
scale_bar_breadth = 1000.0
scale_bar_voltage_breadth = 50.0
if time_len is not None:
m.t = m.t * len(m.t) / time_len
scale_bar_breadth = scale_bar_breadth * len(m.t) / time_len
for j in range(len(prinzdb.neutypes)):
if time_len is not None:
axV.plot(m.t[10000+offset:10000+offset+time_len], Vx[j, 10000+offset:10000+offset+time_len] + 120.0 * (2 - j),
label=prinzdb.neutypes[j], lw=0.75, c='k', rasterized=True)
else:
axV.plot(m.t, Vx[j] + 120.0 * (2 - j), label=prinzdb.neutypes[j], lw=0.75, c='k')
current_col += 1
if scale_bar:
if mode2 == 'small':
axV.plot(10860 + np.arange(scale_bar_breadth), 318 * np.ones_like(np.arange(scale_bar_breadth)), lw=1.0,
color='k', zorder=5, rasterized=True)
axV.text(10905, 324, '1 sec', fontsize=font_size)
import matplotlib.patches as patches
rect = patches.Rectangle((11890, 234), 2000, 100, linewidth=1, facecolor='w', zorder=3)
axV.add_patch(rect)
axV.plot(13490 * np.ones_like( | np.arange(scale_bar_voltage_breadth) | numpy.arange |
#!usr/bin/env python
import numpy as np
from sklearn.model_selection import LeaveOneGroupOut
class kfold_wrapper(object):
"""
K-Fold Wrapper
"""
def __init__(self, estimator, est_fun, n_folds=None, fold_method=None, val_size=0.2):
"""
Parameters
----------
:param estimator: class
Class of Estimator, must have Sklearn API
:param est_fun: str
Function of Estimator,
If 'class', the estimator is treated as a classifier.
If 'reg', the estimator is treated as a regressor.
:param n_folds: int (default=None)
Number of folds.
If n_folds=1 or None, means no K-Fold
:param fold_method: str (default=None)
Method to Fold.
If 'None', group cross_folds will be adopted.
If 'group', the same method as 'None'.
If 'sequence', serial n_folds will be adopted,
and param val_size will be useful.
:param val_size: float (default=0.2)
the ratio of valication in member of serial n_folds,
and the length of valicaiton is rounded off.
"""
setattr(self, 'estimator', estimator)
setattr(self, 'est_fun', est_fun)
if n_folds is None:
setattr(self, 'n_folds', 1)
else:
setattr(self, 'n_folds', int(n_folds))
if fold_method is None:
setattr(self, 'fold_method', 'group')
else:
setattr(self, 'fold_method', fold_method)
setattr(self, 'val_size', val_size)
setattr(self, 'cv_pred_prob', None)
def _data_group_foldindx(self, X, y):
assert 2 <= len(X.shape) <= 3, "X.shape should be n x k or n x n2 x k"
assert len(X.shape) == len(y.shape) + 1
n_stratify = X.shape[0]
fold_len = int( | np.divide(n_stratify, self.n_folds) | numpy.divide |
""" Module stentPoints2d
Implements functions to detect points on a stent in 2D slices sampled
orthogonal to the stent's centerline.
"""
import os, sys, time
import numpy as np
import visvis as vv
from points import Point, Pointset, Aarray
#import subpixel deprecated module, use interp instead
from stentseg import gaussfun
## Point detection
# HU (if a pixel next to the candidate is this low, its probably a streak)
th_streHU = 0.0;
def detect_points(slice, th_gc=2000, th_minHU=300, sigma=1.6):
""" Detect points
Detects points on a stent in the given slice. Slice
should be a numpy array.
- The Gaussian Curvature should be above a threshold
(needle detection) (th_gc)
- An absolute (weak) threshold is used based on the
Houndsfield units (th_minHU)
- Streak artifacts are suppressed
- sigma is the used scale at which the GC is calculated.
"""
# Make sure that the slice is a float
if slice.dtype not in [np.float32, np.float64]:
slice = slice.astype(np.float32)
# Create slice to supress streak artifacts
# Where streak artifacts are present, the image is inverted, anywhere else
# its zero. By also calculating derivatives on this image and than adding
# it, the threshold will never be reached where the streak artifacts are.
sliceStreak = th_streHU - slice
sliceStreak[sliceStreak<0] = 0
# Create new point set to store points
pp = Pointset(2)
# Calculate Gaussian curvature
if True:
Lxx = gaussfun.gfilter(slice, sigma, [0,2])
Ltmp = gaussfun.gfilter(sliceStreak, sigma, [0,2])
Lxx = Lxx+2*Ltmp
Lxx[Lxx>0]=0;
Lyy = gaussfun.gfilter(slice, sigma, [2,0])
Ltmp = gaussfun.gfilter(sliceStreak, sigma, [2,0])
Lyy = Lyy+2*Ltmp
Lyy[Lyy>0]=0;
Lgc = Lxx * Lyy
# Make a smoothed version
slice_smoothed = gaussfun.gfilter(slice, 0.5, 0)
# Make a selection of candidate pixels
Iy,Ix = np.where( (slice > th_minHU) & (Lgc > th_gc) )
# Mask to detect clashes
clashMask = np.zeros(slice.shape, dtype=np.bool)
# Detect local maxima
for x,y in zip(Ix,Iy):
if x==0 or y==0 or x==slice.shape[1]-1 or y==slice.shape[0]-1:
continue
# Select patch
patch1 = slice[y-1:y+2,x-1:x+2]
patch2 = slice_smoothed[y-1:y+2,x-1:x+2]
if slice[y,x] == patch1.max():# and slice_smoothed[y,x] == patch2.max():
# Found local max (allowing shared max)
# Not if next to another found point
if clashMask[y,x]:
continue
# Not a streak artifact
if patch2.min() <= th_streHU:
continue
# Subpixel
#dx,dy = subpixel.fitLQ2_5(patch1)
# Store
pp.append( x, y )
clashMask[y-1:y+2,x-1:x+2] = 1
# Express points in world coordinates and return
if isinstance(slice, Aarray):
ori = [i for i in reversed(slice.origin)]
sam = [i for i in reversed(slice.sampling)]
pp *= Point(sam)
pp += Point(ori)
return pp
def detect_points2(slice, y_x, spacing, width=10):
""" Alternative version of detect_points, which uses a reference point.
Used for measurements on our phantom.
"""
# create slice as double (makes a copy)
slice = slice.astype(np.float64)
# create new point set to store points
pp = Pointset(2)
th_minHU = 300
refy, refx = y_x
refy -= spacing # because we start with incrementing it
while 1:
# next
refy += spacing
if refy > slice.shape[0]:
break
# get patch
y1, y2 = refy - spacing//4, refy + spacing//4
x1, x2 = refx - width//2, refx + width//2
if y1<0: y1=0
patch = slice[y1:y2+1, x1:x2+1]
# detect
Iy, Ix = np.where( (patch == patch.max()) & (patch > th_minHU) )
try:
Ix = Ix[0]
Iy = Iy[0]
except IndexError:
continue # if no points found...
y, x = y1+Iy, x1+Ix
if y<=0 or y>=slice.shape[0]-1:
continue
# get subpixel and store
patch2 = slice[y-1:y+2,x-1:x+2]
dx,dy = subpixel.fitLQ2_5(patch2)
pp.append( x+dx, y+dy )
return pp
## Clustering
eps = 0.00000001
def chopstick_criteria(c, p1, p2, pp, param=0.25):
## Calculate intrinsic properties
# 1 means point one (if relative measure, relative to c). 2 idem
# plural means the point set, relative to c.
# plural and 1 means for all points, relative to point 1.
# Calculate vectors
vec1 = c - p1
vec2 = c - p2
vecs = c - pp
# Calculate angles
ang12 = abs( float(vec1.angle(vec2)) )
angs1 = np.abs( vec1.angle(vecs) )
angs2 = np.abs( vec2.angle(vecs) )
# Calculate distance
dist1 = float( vec1.norm() )
dist2 = float( vec2.norm() )
dists = vecs.norm()
dist12 = p1.distance(p2) + eps
#dists1 = p1.distance(pp)
#dists2 = p2.distance(pp)
# Get the point between two points
p12 = (p1+p2) * 0.5
## Criterion 1: focus
# Find subset of points that is on the proper side of the centre.
# For this we get the angle between the line p1-c, to p-c. We need to
# check the angle p1-p3, to see if the result should me smaller or larger
# than zero. The result is a kind of FAN from the centre, spanned by
# the two points.
M1 = (angs1 < ang12) * (angs2 < ang12)
## Criterion 2: ellipse
# There are two things to be determined. The point p3, (p4 in the paper)
# and the ellipsepoints on the lines p1-p3 and p2-p3.
# Note the change in behaviour when d<0:
# - the ellipsepoints are both in p3
# - the point p3 is moved differently.
# Get distance vector
d = p12.distance(c)
f = 1 - np.exp(-d/dist12)
if f < eps:
f = eps
f = float(f)
# Get normal to line p1-p2
n = (p1-p2).normal()
# Flip normal if its in the direction of the center
if (p12 + n*d).distance(c) < d:
n = -1 * n
# Go from p12, a bit in the direction of the normal
p3 = p12
ratio = param # 0.25
if d>0: d3 = ratio*d/f
else: d3 = ratio*dist12-d
p3 = p12 + n*d3
# Ellipse points
e1 = f*p1 + (1-f)*p3
e2 = f*p2 + (1-f)*p3
# Ellipse. Make sure the length of the string is a bit too short so
# that p1 and p2 themselves will not be included.
d1 = e1.distance(pp) + e2.distance(pp)
d = e1.distance(p1) + p1.distance(e2)
M2 = d1 < d*0.99
return M1, M2
def add_angles(ang1, ang2):
""" add_angles(ang1, ang2)
Add two angles, returning a result that's always between
-pi and pi. Each angle can be a scalar or a numpy array.
"""
# Get pi and subtract angles
pi = np.pi
dang = ang1 + ang2
if isinstance(dang, np.ndarray):
# Limit, wrap around
while True:
I, = np.where(dang < -pi)
if len(I):
dang[I] += 2*pi
else:
break
while True:
I, = np.where(dang > pi)
if len(I):
dang[I] -= 2*pi
else:
break
else:
while dang < -pi:
dang += 2*pi
while dang > pi:
dang -= 2*pi
# Done
return dang
def subtract_angles(ang1, ang2):
""" subtract_angles(ang1, ang2)
Subtract two angles, returning a result that's always between
-pi and pi. Each angle can be a scalar or a numpy array.
"""
# Get pi and subtract angles
pi = np.pi
dang = ang1 -ang2
if isinstance(dang, np.ndarray):
# Limit, wrap around
while True:
I, = np.where(dang < -pi)
if len(I):
dang[I] += 2*pi
else:
break
while True:
I, = np.where(dang > pi)
if len(I):
dang[I] -= 2*pi
else:
break
else:
while dang < -pi:
dang += 2*pi
while dang > pi:
dang -= 2*pi
# Done
return dang
def fit_cirlce(pp, warnIfIllDefined=True):
""" fit_cirlce(pp, warnIfIllDefined=True)
Calculate the circle (x - c.x)**2 + (y - x.y)**2 = c.r**2
From the set of points pp. Returns a point instance with an added
attribute "r" specifying the radius.
In case the three points are on a line, the algorithm will fail, and
return 0 for x,y and r. This waring can be suppressed.
The solution is a Least Squares fit. The method as describes in [1] is
called Modified Least Squares (MLS) and poses a closed form solution
which is very robust.
[1]
<NAME> and <NAME>
2000
A Few Methods for Fitting Circles to Data
IEEE Transactions on Instrumentation and Measurement
"""
# Init error point
ce = Point(0,0)
ce.r = 0.0
def cov(a, b):
n = len(a)
Ex = a.sum() / n
Ey = b.sum() / n
return ( (a-Ex)*(b-Ey) ).sum() / (n-1)
# Get x and y elements
X = pp[:,0]
Y = pp[:,1]
# In the paper there is a factor n*(n-1) in all equations below. However,
# this factor is removed by devision in the equations in the following cell
A = cov(X,X)
B = cov(X,Y)
C = cov(Y,Y)
D = 0.5 * ( cov(X,Y**2) + cov(X,X**2) )
E = 0.5 * ( cov(Y,X**2) + cov(Y,Y**2) )
# Calculate denumerator
denum = A*C - B*B
if denum==0:
if warnIfIllDefined:
print("Warning: can not fit a circle to the given points.")
return ce
# Calculate point
c = Point( (D*C-B*E)/denum, (A*E-B*D)/denum )
# Calculate radius
c.r = c.distance(pp).sum() / len(pp)
# Done
return c
def sample_circle(c, N=32):
""" sample_circle(c, N=32)
Sample a circle represented by point c (having attribute "r") using
N datapoints. Returns a pointset.
"""
# Get radius
r = 1.0
if hasattr(c, 'r'):
r = c.r
# Sample N points, but add one to close the loop
d = 2*np.pi / N
a = np.linspace(0,2*np.pi, N+1)
# Prepare array
pp = np.empty((len(a), 2), dtype=np.float32)
# Apply polar coordinates
pp[:,0] = | np.cos(a) | numpy.cos |
#!/usr/bin/env python
import abc
import numpy as np
#third party imports
from .dataset import DataSet
from .geodict import GeoDict
class Grid(DataSet):
"""
An abstract class to represent lat/lon gridded datasets. Grids are
assumed to be pixel-registered - that is, grid coordinates
represent the value at the *center* of the cells.
"""
@abc.abstractmethod #should be a classmethod when instantiated
def getFileGeoDict(filename):
"""
Abstract method to return the bounding box, resolution, and shape of a file in whatever Grid format.
:param filename:
The path to the filename of whatever grid format this is being implemented in.
:returns:
A geodict specifying the bounding box, resolution, and shape of the data in a file.
"""
raise NotImplementedError
@abc.abstractmethod #should be a classmethod when instantiated
def getBoundsWithin(filename,geodict):
"""
Abstract method to return a geodict for this file that is guaranteed to be inside the input geodict defined, without resampling.
:param filename:
The name of the file whose resolution/extent should be used.
:param geodict:
The geodict which is used as the base for finding the bounds for this file guaranteed to be inside of this geodict.
:raises NotImplementedError:
Always in base class
"""
raise NotImplementedError
@classmethod
def _getPadding(cls,geodict,paddict,padvalue):
#get pad left columns - go outside specified bounds if not exact edge
pxmin,pxmax,pymin,pymax = (paddict.xmin,paddict.xmax,paddict.ymin,paddict.ymax)
gxmin,gxmax,gymin,gymax = (geodict.xmin,geodict.xmax,geodict.ymin,geodict.ymax)
dx,dy = (geodict.dx,geodict.dy)
ny,nx = (geodict.ny,geodict.nx)
padleftcols = int(np.ceil((gxmin - pxmin)/dx))
padrightcols = int(np.ceil((pxmax - gxmax)/dx))
padbottomrows = int(np.ceil((gymin - pymin)/dy))
padtoprows = int(np.ceil((pymax - gymax)/dy))
#if any of these are negative, set them to zero
if padleftcols < 0:
padleftcols = 0
if padrightcols < 0:
padrightcols = 0
if padbottomrows < 0:
padbottomrows = 0
if padtoprows < 0:
padtoprows = 0
leftpad = np.ones((ny,padleftcols))*padvalue
rightpad = np.ones((ny,padrightcols))*padvalue
nx += padrightcols + padleftcols
bottompad = | np.ones((padbottomrows,nx)) | numpy.ones |
#!/usr/bin/env python
# stdlib modules
import sys
import os.path
import pickle
# third party modules
import numpy as np
# local imports
from shakelib.station import StationList
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this script?
shakedir = os.path.abspath(os.path.join(homedir, '..', '..'))
sys.path.insert(0, shakedir)
#
# Set SAVE to True to write new versions of the output to disk,
# set it to False to actually run the tests.
#
SAVE = False
def test_station():
homedir = os.path.dirname(os.path.abspath(__file__))
#
# First test the Calexico data on its own
#
event = 'Calexico'
datadir = os.path.abspath(os.path.join(homedir, 'station_data'))
datadir = os.path.abspath(os.path.join(datadir, event, 'input'))
inputfile = os.path.join(datadir, 'stationlist_dat.xml')
dyfifile = os.path.join(datadir, 'ciim3_dat.xml')
xmlfiles = [inputfile, dyfifile]
stations = StationList.loadFromXML(xmlfiles, ":memory:")
df1 = stations.getStationDictionary(instrumented=True)
df2 = stations.getStationDictionary(instrumented=False)
ppath = os.path.abspath(os.path.join(datadir, '..', 'database',
'test1.pickle'))
if SAVE:
ldf = [df1, df2]
with open(ppath, 'wb') as f:
pickle.dump(ldf, f, pickle.HIGHEST_PROTOCOL)
else:
with open(ppath, 'rb') as f:
ldf = pickle.load(f)
saved_df1 = ldf[0]
saved_df2 = ldf[1]
compare_dataframes(saved_df1, df1)
compare_dataframes(saved_df2, df2)
#
# Should at least hit this code
#
imtlist = stations.getIMTtypes()
assert 'PGA' in imtlist
assert 'PGV' in imtlist
#
# Add the Northridge data to the Calexico data to test
# addData()
#
event = 'northridge'
datadir = os.path.abspath(os.path.join(homedir, 'station_data'))
datadir = os.path.abspath(os.path.join(datadir, event, 'input'))
inputfile = os.path.join(datadir, 'hist_dat.xml')
dyfifile = os.path.join(datadir, 'dyfi_dat.xml')
xmlfiles = [inputfile, dyfifile]
stations = stations.addData(xmlfiles)
df1 = stations.getStationDictionary(instrumented=True)
df2 = stations.getStationDictionary(instrumented=False)
ppath = os.path.abspath(os.path.join(datadir, '..', 'database',
'test2.pickle'))
if SAVE:
ldf = [df1, df2]
with open(ppath, 'wb') as f:
pickle.dump(ldf, f, pickle.HIGHEST_PROTOCOL)
else:
with open(ppath, 'rb') as f:
ldf = pickle.load(f)
saved_df1 = ldf[0]
saved_df2 = ldf[1]
compare_dataframes(saved_df1, df1)
compare_dataframes(saved_df2, df2)
def test_station2():
#
# Test the wenchuan data
#
homedir = os.path.dirname(os.path.abspath(__file__))
event = 'wenchuan'
datadir = os.path.abspath(os.path.join(homedir, 'station_data'))
datadir = os.path.abspath(os.path.join(datadir, event, 'input'))
inputfile = os.path.join(datadir, 'stationlist.xml')
xmlfiles = [inputfile]
stations = StationList.loadFromXML(xmlfiles, ":memory:")
df1 = stations.getStationDictionary(instrumented=True)
df2 = stations.getStationDictionary(instrumented=False)
ppath = os.path.abspath(os.path.join(datadir, '..', 'database',
'test3.pickle'))
if SAVE:
ldf = [df1, df2]
with open(ppath, 'wb') as f:
pickle.dump(ldf, f, pickle.HIGHEST_PROTOCOL)
else:
with open(ppath, 'rb') as f:
ldf = pickle.load(f)
saved_df1 = ldf[0]
saved_df2 = ldf[1]
compare_dataframes(saved_df1, df1)
compare_dataframes(saved_df2, df2)
#
# Dump the database to SQL and then restore it to a new
# StationList object. Compare dataframes.
#
sql = stations.dumpToSQL()
stations2 = StationList.loadFromSQL(sql)
df1 = stations2.getStationDictionary(instrumented=True)
df2 = stations2.getStationDictionary(instrumented=False)
compare_dataframes(saved_df1, df1)
compare_dataframes(saved_df2, df2)
def test_station3():
#
# Exercise the geojson code. Can't really compare it to anything
# because the ordering is scrambled by the hashes in the XML
# parsing stage. Once (if) we institute a loadFromJSON() method, we
# can do a comparison.
#
homedir = os.path.dirname(os.path.abspath(__file__))
event = 'wenchuan'
datadir = os.path.abspath(os.path.join(homedir, 'station_data'))
datadir = os.path.abspath(os.path.join(datadir, event, 'input'))
inputfile = os.path.join(datadir, 'stationlist.xml')
xmlfiles = [inputfile]
stations = StationList.loadFromXML(xmlfiles, ":memory:")
myjson = stations.getGeoJson() # noqa
def test_station4():
homedir = os.path.dirname(os.path.abspath(__file__))
event = 'northridge'
datadir = os.path.abspath(os.path.join(homedir, 'station_data'))
datadir = os.path.abspath(os.path.join(datadir, event, 'input'))
dyfifile = os.path.join(datadir, 'dyfi_dat.xml')
xmlfiles = [dyfifile]
stations = StationList.loadFromXML(xmlfiles, ":memory:")
df1 = stations.getStationDictionary(instrumented=True) # noqa
df2 = stations.getStationDictionary(instrumented=False) # noqa
assert df1 is None
def test_station5():
homedir = os.path.dirname(os.path.abspath(__file__))
event = 'Calexico'
datadir = os.path.abspath(os.path.join(homedir, 'station_data'))
datadir = os.path.abspath(os.path.join(datadir, event, 'input'))
inputfile = os.path.join(datadir, 'stationlist_dat.xml')
dyfifile = os.path.join(datadir, 'ciim3_dat.xml')
xmlfiles = [inputfile, dyfifile]
stations1 = StationList.loadFromXML(xmlfiles, ":memory:")
#
# Load the data more than once to exercise the code that handles
# repeated entries.
#
xmlfiles = [inputfile, inputfile, dyfifile, dyfifile]
stations2 = StationList.loadFromXML(xmlfiles, ":memory:")
df1 = stations1.getStationDictionary(instrumented=True)
df2 = stations2.getStationDictionary(instrumented=True)
compare_dataframes(df1, df2)
def compare_dataframes(df1, df2):
assert sorted(list(df1.keys())) == sorted(list(df2.keys()))
idx1 = | np.argsort(df1['id']) | numpy.argsort |
# -*- coding: UTF-8 -*-
"""
Created by louis at 2021/9/13
Description:
"""
import os
import gc
import glob
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pandas as pd
import time
from itertools import islice
from torch.utils.data import Dataset, DataLoader
from multiprocessing import Pool
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from torch.utils.tensorboard import SummaryWriter
from tqdm.auto import tqdm
import logging
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(filename='pytorch-baseline.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',
datefmt=datefmt, level=logging.DEBUG)
# import tqdm
tqdm.pandas()
import warnings
from multiprocessing import cpu_count
def get_path_dict(f, v):
f_dict = {}
for i in tqdm(v):
fpath = f'{f}/stock_id={i}'
flist = glob.glob(os.path.join(fpath, '*.parquet'))
if len(flist) > 0:
f_dict[i] = flist[0]
return f_dict
# train_idx, valid_idx = train_test_split(train_ds['row_id'], shuffle=True, test_size=0.1, random_state=SEED)
# ds: train.csv里面的数据 f_dict:是 book_train.parquet 里面的数据
def process_optiver_ds(ds, f_dict, skip_cols, t_dict):
x = []
y = []
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
for stock_id, stock_fnmame in tqdm(f_dict.items()):
trade_train_ = t_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_)
optiver_ds = pd.read_parquet(stock_fnmame)
time_ids = optiver_ds['time_id'].unique()
for time_id in time_ids:
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade_train_[trade_train_['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_.drop(['time_id_x', 'time_id_y'], axis=1)
optiver_ds_ = np.nan_to_num(optiver_ds_)
row_id = str(stock_id) + '-' + time_id.astype(str)
r = ds[ds['row_id'] == row_id]['target']
x.append(optiver_ds_)
y.append(r)
return x, y
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def process_book_train_chunk(chunk_ds):
return process_optiver_ds(train_ds, chunk_ds, book_skip_columns, trade_train_dict)
def process_book_test_chunk(chunk_ds):
return process_optiver_ds(test_ds, chunk_ds, book_skip_columns, trade_test_dict)
'''
# 将样本分成4块,每块里面有28条数据
book_train_chunks = [i for i in chunks(book_train_dict, int(len(book_train_dict) / NTHREADS))]
# trade_train_chunks = [i for i in chunks(trade_train_dict, int(len(trade_train_dict) / NTHREADS))]
z = 1 if len(book_test_dict) < NTHREADS else NTHREADS
book_test_chunks = [i for i in chunks(book_test_dict, int(len(book_test_dict) / z))]
# trade_test_chunks = [i for i in chunks(trade_test_dict, int(len(trade_test_dict) / z))]
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_train_chunk, book_train_chunks)
pool.close()
a1, a2 = zip(*r)
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_test_chunk, book_test_chunks)
pool.close()
t_a1, t_a2 = zip(*r)
np_train = a1
np_target = a2'''
# Scaler
# transformers = []
# for i in tqdm(range(np_train.shape[1])):
# a = np.nan_to_num(np_train[train_idx])
# b = np.nan_to_num(np_train[valid_idx])
#
# transformer = StandardScaler() # StandardScaler is very useful!
# np_train[train_idx] = transformer.fit_transform(a)
# np_train[valid_idx] = transformer.transform(b)
# transformers.append(transformer) # Save Scalers for the inference stage
class LSTMModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.1):
super(LSTMModel, self).__init__()
# self.drop = nn.Dropout(dropout)
# self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp + input_features_num, nhid + input_features_num, nlayers, dropout=dropout,
batch_first=True, bidirectional=True)
self.regress_rnn = nn.Sequential(
nn.BatchNorm1d(2 * nhid + 2 * input_features_num),
nn.Linear(2 * nhid + 2 * input_features_num, 1),
nn.Sigmoid()
)
self.decoder = nn.Sequential(
nn.BatchNorm1d(3 * nhid + 2 * input_features_num),
nn.Linear(3 * nhid + 2 * input_features_num, nhid + input_features_num),
nn.ReLU(),
nn.Dropout(0.2),
nn.BatchNorm1d(nhid + input_features_num),
nn.Linear(nhid + input_features_num, ntoken),
nn.ReLU(),
nn.Dropout(0.1),
nn.BatchNorm1d(ntoken),
nn.Linear(ntoken, 1),
nn.Sigmoid()
)
self.self_attention = nn.Sequential(
nn.Linear(3 * nhid + 2 * input_features_num, 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 3 * nhid + 2 * input_features_num),
nn.Softmax(dim=1)
)
# self.decoder_1 = nn.Linear(nhid, ntoken)
# self.decoder_2 = nn.Linear(ntoken, 1)
self.conv1d_relu_stack = nn.Sequential(
nn.Conv1d(in_channels=600, out_channels=1200, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 9
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 7
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 5
nn.Conv1d(in_channels=1200, out_channels=600, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 3
nn.Conv1d(in_channels=600, out_channels=nhid, kernel_size=3),
nn.ReLU(), # 1
)
self.regress_conv = nn.Sequential(
nn.BatchNorm1d(nhid),
nn.Linear(nhid, 1),
nn.Sigmoid()
)
self.linear_relu_stack = nn.Sequential(
nn.Linear(input_features_num, ntoken),
nn.Dropout(0.1),
nn.ReLU(),
nn.Linear(ntoken, ninp),
nn.Dropout(0.2),
nn.ReLU(),
nn.Linear(ninp, ninp),
nn.Dropout(0.2),
nn.ReLU(),
)
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input):
# emb = self.drop(self.encoder(input))
cov_logits = self.conv1d_relu_stack(input)
cov_logits = cov_logits.view(cov_logits.shape[0], cov_logits.shape[1])
regress_conv_out = self.regress_conv(cov_logits)
logits = self.linear_relu_stack(input)
logits = torch.cat((logits, input), 2)
# logits = logits.view(1, len(logits), -1)
output, hidden = self.rnn(logits)
output = output[:, -1, :]
regress_rnn_out = self.regress_rnn(output)
new_logits = torch.cat((cov_logits, output), 1)
# attention_output = self.self_attention(new_logits)
# output = self.drop(output)
new_logits = torch.mul(new_logits, self.self_attention(new_logits))
# decoded_out = self.decoder(new_logits)
decoded_out = self.decoder(new_logits)
# decoded_2 = self.decoder_2(decoded_1)
return regress_conv_out, regress_rnn_out, decoded_out
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
# dataloader = DataLoader(transformed_dataset, batch_size=4,
# shuffle=True, num_workers=0)
def rmspe(y_pred,y_true):
rms = np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
return rms
def RMSPELoss(y_pred, y_true):
return torch.sqrt(torch.mean(((y_true - y_pred) / y_true) ** 2)).clone()
def do_process(optiver_ds, full_seconds_in_bucket, trade__, time_id):
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
if optiver_ds_.size == 0:
return None
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade__[trade__['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_ = optiver_ds_.drop(['time_id_x', 'time_id_y', 'seconds_in_bucket'], axis=1)
optiver_ds_ = np.nan_to_num(optiver_ds_)
# TODO 将每一列进行标准化
for i in range(optiver_ds_.shape[1]):
if np.sum(optiver_ds_[:, i]) != 0 and np.std(optiver_ds_[:, i]) != 0:
optiver_ds_[:, i] = (optiver_ds_[:, i] - | np.mean(optiver_ds_[:, i]) | numpy.mean |
import os
import warnings
from collections import OrderedDict
import numpy as np
import astropy.units as u
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning
from . import utils
__all__ = ["TemplateError", "Template", "Redden", "ModifiedBlackBody",
"read_templates_file", "load_phoenix_stars",
"bspline_templates", "gaussian_templates"]
class TemplateError(object):
def __init__(self, file='templates/TEMPLATE_ERROR.eazy_v1.0', arrays=None, filter_wavelengths=[5500.], scale=1.):
"""
Template error function with spline interpolation at arbitrary redshift.
Parameters
----------
file : str
File containing the template error function definition
(columns of wavelength in Angstroms and the TEF).
arrays : optional, (wave, TEF)
Set from arrays rather than reading from ``file``.
filter_wavelengths : list
List of filter pivot wavelengths (observed-frame Angstroms).
scale : float
Scale factor multiplied to TEF array, e.g., the ``TEMP_ERR_A2``
parameter.
Attributes
----------
te_x, te_y : arrays
The input wavelength and TEF arrays.
min_wavelength, min_wavelength : float
Min/max of the wavelengths in ``te_x``.
clip_lo, clip_hi : float
Extrapolation limits to use if redshifted filters fall outside
defined ``te_x`` array
"""
self.file = file
if arrays is None:
self.te_x, self.te_y = np.loadtxt(file, unpack=True)
else:
self.te_x, self.te_y = arrays
self.scale = scale
self.filter_wavelengths = filter_wavelengths
self._set_limits()
self._init_spline()
def _set_limits(self):
"""
Limits to control extrapolation
"""
nonzero = self.te_y > 0
self.min_wavelength = self.te_x[nonzero].min()
self.max_wavelength = self.te_x[nonzero].max()
self.clip_lo = self.te_y[nonzero][0]
self.clip_hi = self.te_y[nonzero][-1]
def _init_spline(self):
"""
Initialize the CubicSpline interpolator
"""
from scipy import interpolate
self._spline = interpolate.CubicSpline(self.te_x, self.te_y)
def interpolate(self, filter_wavelength=5500., z=1.):
"""
``filter_wavelength`` is observed wavelength of photometric filters.
But these sample the *rest* wavelength of the template error function
at lam/(1+z)
"""
return self._spline(filter_wavelength/(1+z))*self.scale
def __call__(self, z, limits=None):
"""
Interpolate TEF arrays at a specific redshift
Parameters
----------
z : float
Redshift
limits : None, (float, float)
Extrapolation limits. If not specified, get from
``clip_lo`` and ``clip_hi`` attributes.
"""
lcz = np.atleast_1d(self.filter_wavelengths)/(1+z)
tef_z = self._spline(np.atleast_1d(self.filter_wavelengths)/(1+z))
if limits is None:
limits = [self.clip_lo, self.clip_hi]
clip_lo = (lcz < self.min_wavelength)
tef_z[clip_lo] = limits[0]
clip_hi = (lcz > self.max_wavelength)
tef_z[clip_hi] = limits[1]
return tef_z*self.scale
class Redden(object):
def __init__(self, model=None, Av=0., **kwargs):
"""
Wrapper function for `dust_attenuation` and `dust_extinction`
reddening laws
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from eazy.templates import Redden
fig, ax = plt.subplots(1,1,figsize=(6,4))
wave = np.arange(1200, 2.e4)
for model in ['calzetti00', 'mw', 'smc', 'reddy15']:
redfunc = Redden(model=model, Av=1.0)
ax.plot(wave, redfunc(wave), label=model)
ax.plot(wave, wave*0+10**(-0.4), color='k',
label=r'$A_\lambda = 1$', linestyle=':')
ax.legend()
ax.loglog()
ax.set_xticks([2000, 5000, 1.e4])
ax.set_xticklabels([0.2, 0.5, 1.0])
ax.grid()
ax.set_xlabel('wavelength, microns')
ax.set_ylabel('Attenuation / extinction (Av=1 mag)')
fig.tight_layout(pad=0.5)
Parameters
----------
model : `extinction`/`attenuation` object or str
Allowable string arguments:
- 'smc': `dust_extinction.averages.G03_SMCBar`
- 'lmc': `dust_extinction.averages.G03_LMCAvg`
- 'mw','f99': `dust_extinction.parameter_averages.F99`
- 'calzetti00', 'c00': `dust_attenuation.averages.C00`
- 'wg00': `dust_attenuation.radiative_transfer.WG00`
- 'kc13': Calzetti with modified slope and dust bump from
Kriek & Conroy (2013)
- 'reddy15': Reddy et al. (2015)
Av : float
Selective extinction/attenuation (passed as `tau_V` for ``WG00``)
"""
allowed = ['smc', 'lmc', 'mw', 'f99', 'c00', 'calzetti00', 'wg00',
'kc13','reddy15','zafar15']
if isinstance(model, str):
self.model_name = model
if model in ['smc']:
from dust_extinction.averages import G03_SMCBar
self.model = G03_SMCBar()
elif model in ['lmc']:
from dust_extinction.averages import G03_LMCAvg
self.model = G03_LMCAvg()
elif model in ['mw','f99']:
from dust_extinction.parameter_averages import F99
self.model = F99()
elif model in ['calzetti00', 'c00']:
from dust_attenuation.averages import C00
self.model = C00(Av=Av)
elif model.lower() in ['kc13']:
from eazy.sps import KC13
self.model = KC13(Av=Av, **kwargs)
elif model.lower() in ['reddy15']:
from eazy.sps import Reddy15
self.model = Reddy15(Av=Av, **kwargs)
elif model.lower() in ['zafar15']:
from eazy.sps import Zafar15
self.model = Zafar15(Av=Av)
elif model in ['wg00']:
from dust_attenuation.radiative_transfer import WG00
if 'tau_V' in kwargs:
self.model = WG00(**kwargs)
else:
self.model = WG00(tau_V=Av, **kwargs)
else:
msg = "Requested model ('{model}') not in {allowed}."
raise IOError(msg.format(model=model, allowed=allowed))
else:
self.model = model
self.model_name = 'Unknown'
for k in ['Av', 'tau_V']:
if hasattr(model, k):
Av = getattr(model, k)
break
self.Av = Av
@property
def ebv(self):
"""
E(B-V) for models that have ``Rv``
"""
if hasattr(self.model, 'Rv'):
return self.Av/self.model.Rv
else:
print('Warning: Rv not defined for model: ' + self.__repr__())
return 0.
def __repr__(self):
msg = '<Redden {0}, Av/tau_V={1}>'
return msg.format(self.model.__repr__(), self.Av)
def __call__(self, wave, left=0, right=1., **kwargs):
"""
Return reddening factor.
Parameters
----------
wave : array (NW)
Wavelength array. If has no units, assume
`~astropy.units.Angstrom`.
left, right : float
Extrapolation at short/long wavelengths
Returns
-------
ext : array (NW)
Extinction / attenuation as a function of wavelength
"""
if not hasattr(wave, 'unit'):
xu = wave*u.Angstrom
else:
if wave.unit is None:
xu.unit = u.Angstrom
else:
xu = wave
if 'Av' in kwargs:
self.Av = kwargs['Av']
if 'tau_V' in kwargs:
self.Av = kwargs['tau_V']
for k in kwargs:
if hasattr(self.model, k):
setattr(self.model, k, kwargs[k])
ext = np.atleast_1d(np.ones_like(xu.value))
if hasattr(self.model, 'x_range'):
if hasattr(self.model, 'extinguish'):
# dust_extinction has x_range in 1/micron
xblue = (1./xu.to(u.micron)).value > self.model.x_range[1]
xred = (1./xu.to(u.micron)).value < self.model.x_range[0]
else:
# dust_attenuation has x_range in micron
xblue = (xu.to(u.micron)).value < self.model.x_range[0]
xred = (xu.to(u.micron)).value > self.model.x_range[1]
ext[xblue] = left
ext[xred] = right
xr = (~xblue) & (~xred)
else:
xr = np.isfinite(wave)
if (self.model is None) | (self.Av <= 0):
# Don't do anything
pass
elif hasattr(self.model, 'extinguish'):
# extinction
ext[xr] = self.model.extinguish(xu[xr], Av=self.Av)
elif hasattr(self.model, 'attenuate'):
# attenuation
if hasattr(self.model, 'tau_V'):
# WG00
self.model.tau_V = self.Av
else:
self.model.Av = self.Av
ext[xr] = self.model.attenuate(xu[xr])
else:
msg = ('Dust model must have either `attenuate` or `extinguish`' +
' method.')
raise IOError(msg)
if hasattr(wave, '__len__'):
return ext
elif ext.size == 1:
return ext[0]
else:
return ext
def read_templates_file(templates_file=None, as_dict=False, **kwargs):
"""
Read templates listed in ``templates_file``.
Parameters
----------
templates_file : str
Filename of the ascii file containing the templates list. Has format
like
.. code::
1 templates/fsps_full/tweak_fsps_QSF_12_v3_001.dat 1.0
2 templates/fsps_full/tweak_fsps_QSF_12_v3_002.dat 1.0
...
N {path} {scale}
where ``scale`` is the factor needed to scale the template wavelength
array to units of Angstroms.
as_dict : bool
Return dictionary rather than a list (e.g., for `grizli`).
kwargs : dict
Extra keyword arguments are passed to `~eazy.templates.Template`
with ``file`` and ``to_angstrom`` keywords set automatically.
Returns
-------
templates : list
List of `eazy.templates.Template` objects (`dict` if ``as_dict``)
"""
lines = open(templates_file).readlines()
templates = []
for line in lines:
if line.strip().startswith('#'):
continue
lspl = line.split()
template_file = lspl[1]
if len(lspl) > 2:
to_angstrom = float(lspl[2])
else:
to_angstrom = 1.
templ = Template(file=template_file, to_angstrom=to_angstrom,
**kwargs)
templates.append(templ)
if as_dict:
tdict = OrderedDict()
for t in templates:
tdict[t.name] = t
return tdict
else:
return templates
class Template():
def __init__(self, file=None, name=None, arrays=None, sp=None, meta={}, to_angstrom=1., velocity_smooth=0, norm_filter=None, resample_wave=None, fits_column='flux', redfunc=Redden(), redshifts=[0], verbose=True, flux_unit=(u.L_sun/u.Angstrom), **kwargs):
"""
Template object.
Can optionally specify a 2D flux array with the first
dimension indicating the template for the nearest redshift in the
corresponding ``redshifts`` list. See When integrating the
filter fluxes with ``integrate_filter``, the template index with the
redshift nearest to the specified redshift will be used.
Parameters
----------
file : str
Filename of ascii or FITS template
arrays : (array, array)
Tuple of ``wave``, ``flux`` arrays. Here ``flux`` assumed to
have units f-lambda.
sp : object
Object with ``wave``, ``flux`` attributes, e.g., from
``prospector``. Here ``flux`` is assumed to have units of f-nu.
to_angstrom : float
Scale factor such that ``wave * to_angstrom`` has units of
`astropy.units.Angstrom`
velocity_smooth : float
Velocity smooothing in km/s, applied if > 0
resample_wave : array
Grid to resample the template wavelengths read from the input
fits_column : str
Column name of the flux column if arrays read from a ``file``
redfunc : `eazy.templates.Redden`
Object to apply additional reddening.
redshifts : array-like
Redshift grid for redshift-dependent templates
flux_unit : `astropy.units.core.Unit`
Units of ``flux`` array.
Attributes
----------
wave : array
wavelength in `astropy.units.Angstrom`, dimensions ``[NWAVE]``.
flux : array
Flux density f-lambda, can have redshift dependence, dimensions
``[NZ, NWAVE]``.
name : str
Label name
meta : dict
Metadata
redfunc : `eazy.templates.Redden`, optional
Object for applying dust reddening.
"""
import copy
from astropy.table import Table
import astropy.units as u
self.wave = None
self.flux = None
self.flux_unit = flux_unit
self.name = 'None'
self.meta = copy.deepcopy(meta)
self.velocity_smooth = velocity_smooth
if name is None:
if file is not None:
self.name = os.path.basename(file)
else:
self.name = name
self.orig_table = None
if sp is not None:
# Prospector
self.wave = np.cast[float](sp.wave)
self.flux = np.cast[float](sp.flux)
# already fnu
self.flux *= utils.CLIGHT*1.e10 / self.wave**2
elif file is not None:
# Read from a file
if file.split('.')[-1] in ['fits','csv','ecsv']:
tab = Table.read(file)
self.wave = tab['wave'].data.astype(float)
if fits_column not in tab.colnames:
msg = (f"'{fits_column}' not in {file}; " +
f"available columns are {tab.colnames}.")
raise ValueError(msg)
self.flux = tab[fits_column].data.astype(float)
self.orig_table = tab
if hasattr(tab[fits_column], 'unit'):
if tab[fits_column].unit is not None:
self.flux_unit = tab[fits_column].unit
# Transpose because FITS tables stored like NWAVE, NZ
if self.flux.ndim == 2:
self.flux = self.flux.T
for k in tab.meta:
self.meta[k] = tab.meta[k]
else:
_arr = np.loadtxt(file, unpack=True)
self.wave, self.flux = _arr[0], _arr[1]
elif arrays is not None:
self.wave, self.flux = arrays[0]*1., arrays[1]*1.
if arrays[0].shape[0] != np.atleast_2d(arrays[1]).shape[1]:
raise ValueError("Array dimensions don't match: "+
f'arrays[0]: {arrays[0].shape}, '+
f'arrays[1]: {arrays[1].shape}, ')
if hasattr(self.flux, 'unit'):
self.flux_unit = self.flux.unit
if hasattr(self.flux, 'value'):
self.flux = self.flux.value
#self.set_fnu()
else:
raise TypeError('Must specify either `sp`, `file` or `arrays`')
if self.flux.ndim == 1:
# For redshift dependence
self.flux = np.atleast_2d(self.flux)
self.redshifts = np.zeros(1)
self.NZ, self.NWAVE = self.flux.shape
else:
self.NZ, self.NWAVE = self.flux.shape
if 'NZ' in self.meta:
redshifts = [self.meta[f'Z{j}']
for j in range(self.meta['NZ'])]
if len(redshifts) != self.NZ:
msg = (f'redshifts ({len(redshifts)})'
f' doesn\'t match flux dimension ({self.NZ})!')
raise ValueError(msg)
self.redshifts = np.array(redshifts)
# if verbose:
# print(f'Redshift dependent! (NZ={self.NZ})')
# Handle optional units
if hasattr(self.wave, 'unit'):
if self.wave.unit is not None:
self.wave = self.wave.to(u.Angstrom).value
else:
self.wave = self.wave.data
else:
self.wave *= to_angstrom
flam_unit = u.erg/u.second/u.cm**2/u.Angstrom
if hasattr(self.flux, 'unit'):
if self.flux.unit is not None:
equiv = u.equivalencies.spectral_density(self.wave*u.Angstrom)
flam = self.flux.to(flam_unit, equivalencies=equiv)
self.flux = flam.value
else:
self.flux = self.flux.data
# Smoothing
if velocity_smooth > 0:
self.smooth_velocity(velocity_smooth, in_place=True)
# Resampling
self.resample(resample_wave, in_place=True)
#self.set_fnu()
# Reddening function
self.redfunc = redfunc
_red = self.redden # test to break at init if fails
def __repr__(self):
if self.name is None:
return self.__class__
else:
return '{0}: {1}'.format(self.__class__, self.name)
def absorbed_energy(self, i=0):
diff = self.flux[i,:]*(1-self.redden)*(self.redden > 0)
absorbed = np.trapz(diff, self.wave)
return absorbed
# if self.NZ == 1:
# return absorbed[0]
# else:
# return absorbed
@property
def redden(self):
"""
Return multiplicative scaling from `self.redfunc`, which is expected
to return attenuation in magnitudes.
"""
if self.redfunc is not None:
red = self.redfunc(self.wave*u.Angstrom)
else:
red = 1.
return red
@property
def shape(self):
"""
Shape of flux attribute
"""
return self.flux.shape
def flux_flam(self, iz=0, z=None, redshift_type='nearest'):
"""
Get redshift-dependent template in units of f-lambda
Parameters
----------
iz : int
Index of template to retrieve
z : float, None
If specified, get the redshift index with
`~eazy.templates.Template.zindex`.
redshift_type : 'nearest', 'interp'
See `~eazy.templates.Template.zindex`.
Returns
-------
flam : array
Template flux density in units of f-lambda, including any
reddening specified in the ``redden`` attribute.
"""
if z is not None:
if redshift_type == 'interp':
iz, frac = self.zindex(z=z, redshift_type=redshift_type)
if frac == 1:
flam = self.flux[iz,:]
else:
flam = frac*self.flux[iz,:]
flam += (1-frac)*self.flux[iz+1,:]
else:
iz = self.zindex(z=z, redshift_type=redshift_type)
flam = self.flux[iz,:]
else:
flam = self.flux[iz,:]
return flam * self.redden
def flux_fnu(self, iz=0, z=None, redshift_type='nearest'):
"""
Get redshift-dependent template in units of f-nu
Parameters
----------
iz : int
Index of template to retrieve
z : float, None
If specified, get the redshift index with
`~eazy.templates.Template.zindex`.
redshift_type : str
See `~eazy.templates.Template.zindex`.
Returns
-------
fnu : array
Template flux density in units of f-nu, including any
reddening specified in the ``redden`` attribute.
"""
flam = self.flux_flam(iz=iz, z=z, redshift_type=redshift_type)
return (flam * self.wave**2 / (utils.CLIGHT*1.e10))
def set_fnu(self):
"""
Deprecated. ``flux_fnu`` is now a more cmoplicated function.
"""
print('Deprecated. ``flux_fnu`` is now a function.')
pass
def smooth_velocity(self, velocity_smooth, in_place=True, raise_error=False):
"""
Smooth template in velocity using ``astro-prospector``
Parameters
----------
velocity_smooth: float
Velocity smoothing *sigma*, in km/s.
in_place : bool
Set internal ``flux`` array to the smoothed array. If False, then
return a new `~eazy.templates.Template` object.
raise_error : bool
If ``from prospect.utils.smoothing import smooth_vel`` fails,
raise an exception or die quietly.
"""
try:
from prospect.utils.smoothing import smooth_vel
except:
if raise_error:
raise ImportError("Couldn't import `prospect.utils.smoothing")
else:
return None
if velocity_smooth <= 0:
if in_place:
return True
else:
return self
sm_flux = np.array([smooth_vel(self.wave, self.flux[i,:], self.wave,
velocity_smooth)
for i in range(self.NZ)])
sm_flux[~np.isfinite(sm_flux)] = 0.
if in_place:
self.flux_orig = self.flux*1
self.velocity_smooth = velocity_smooth
self.flux = sm_flux
return True
else:
return Template(arrays=(self.wave, sm_flux),
name=self.name, meta=self.meta,
redshifts=self.redshifts)
def to_observed_frame(self, z=0, scalar=1., extra_sigma=0, lsf_func='Bacon', to_air=True, wavelengths=None, smoothspec_kwargs={'fftsmooth':False}, include_igm=True, clip_wavelengths=[4500,9400]):
"""
Smooth and resample to observed-frame wavelengths, including an
optional Line Spread Function (LSF)
Note that the smoothing is performed with
`prospect.utils.smoothing.smoothspec <https://prospect.readthedocs.io/en/latest/api/utils_api.html>`_,
which doesn't integrate precisely over "pixels" for spectral
resolutions that are similar to or less than the target smoothing
factor.
Parameters
----------
z : float
Target redshift. Note that only the wavelength array is shifted
by ``(1+z)``. The flux densities optionally include IGM
absorption (and dust from the ``redfunc`` attribute) but don't
include the ``fl_obs = fl_rest / (1+z)`` scaling.
scalar : float, array
Scalar value or array with same dimensions as ``wave`` and
``flux`` attributes
extra_sigma : float
Extra velocity dispersion (sigma, km/s) to add in quadrature with
the MUSE LSF
lsf_func : 'Bacon', function
Line Spread Function (LSF). If ``'Bacon'``, then use the "UDF-10"
MUSE LSF from `Bacon et al. 2017
<https://ui.adsabs.harvard.edu/abs/2017A%26A...608A...1B>`_ (Eq.
8). Can also be a ``function`` that takes an argument of
wavelength in Angstroms and returns the LSF sigma, in Angstroms.
If neither of these, then only `extra_sigma` will be applied.
to_air : bool
Apply vacuum-to-air conversion with `mpdaf.obj.vactoair <https://mpdaf.readthedocs.io/en/latest/api/mpdaf.obj.vactoair.html>`_
wavelengths : array, None
Optional wavelength grid (observed frame) of the target output
(e.g., MUSE) spectrum
smoothspec_kwargs : dict
Extra keyword arguments to pass to the Prospector smoothing
function `prospect.utils.smoothing.smoothspec <https://prospect.readthedocs.io/en/latest/api/utils_api.html>`_.
When testing with very high resolution templates around a specific
wavelength, ``smoothspec_kwargs = {'fftsmooth':True}`` did not
always work as expected, so be careful with this option (which is
much faster).
include_igm : bool
Include IGM absorption at indicated redshift
clip_wavelengths : [float, float]
Trim the full observed-frame wavelength array before convolving.
The defaults bracket the nominal MUSE range.
Returns
-------
tobs : `~eazy.template.Template`
Smoothed and resampled `~eazy.template.Template` object
"""
from astropy.stats import gaussian_sigma_to_fwhm
from prospect.utils.smoothing import smoothspec
wobs = self.wave*(1+z)
if include_igm:
igmz = self.igm_absorption(z, pow=include_igm)
else:
igmz = 1.
if to_air:
try:
from mpdaf.obj import vactoair
wobs = vactoair(wobs)
except ImportError:
msg = ("`to_air` requested but `from mpdaf.obj import " +
"vactoair` failed")
warnings.warn(msg, AstropyUserWarning)
if clip_wavelengths is not None:
clip = wobs >= clip_wavelengths[0]
clip &= wobs <= clip_wavelengths[1]
if clip.sum() == 0:
raise ValueError('No template wavelengths found in the '+
f'clipping range {clip_wavelengths} '+
'Angstroms')
else:
clip = wobs > 0
if lsf_func in ['Bacon']:
# UDF-10 LSF from Bacon et al. 2017
bacon_lsf_fwhm = lambda w: 5.866e-8 * w**2 - 9.187e-4*w + 6.04
sig_ang = bacon_lsf_fwhm(wobs[clip]) / gaussian_sigma_to_fwhm
lsf_sigma = sig_ang/wobs[clip]*3.e5
lsf_func_name = 'MUSE-LSF'
elif hasattr(lsf_func, '__call__'):
lsf_sigma = lsf_func(wobs[clip])/wobs[clip]*3.e5
lsf_func_name = 'user'
else:
lsf_sigma = 0.
lsf_func_name = None
# Quadrature sum of LSF and extra velocities
vel_sigma = np.sqrt(lsf_sigma**2 + extra_sigma**2)
# In Angstroms
smooth_lambda = vel_sigma / 3.e5 * wobs[clip]
# Do the smoothing
flux_smooth = smoothspec(wobs[clip],
(self.flux_flam(z=z)*igmz*scalar)[clip],
resolution=smooth_lambda,
smoothtype='lsf', **smoothspec_kwargs)
newname = self.name + f' z={z:.3f}'
if lsf_func_name is not None:
newname += ' + ' + lsf_func_name
if extra_sigma > 0:
newname += ' + {extra_sigma:.1f} km/s'
tobs = Template(arrays=(wobs[clip], flux_smooth),
name=newname, resample_wave=wavelengths,
redshifts=[z])
return tobs
def resample(self, new_wave, z=0, in_place=True, return_array=False, interp_func=None):
"""
Resample the template to a new wavelength grid
Parameters
----------
new_wave : array
New wavelength array, can have units.
z : float
Redshift internal wavelength before resampling.
(z=0 yields no shift).
in_place : bool
Set internal ``wave`` and ``flux`` arrays to the resampled
values
return_array : bool
Return the resampled ``flux`` array if true, else return a new
`~eazy.templates.Template` object.
interp_func : None
Interpolation function. If nothing specified, tries to use
`grizli.utils_c.interp.interp_conserve_c` and falls back to
`eazy.utils.interp_conserve`.
"""
import astropy.units as u
breakme = False
if isinstance(new_wave, str):
if new_wave == 'None':
breakme = True
elif not os.path.exists(new_wave):
msg = 'WARNING: new_wave={0} could not be found'
print(msg.format(new_wave))
breakme = True
else:
new_wave = np.loadtxt(new_wave)
elif new_wave is None:
breakme = True
if breakme:
if in_place:
return False
else:
return self
if hasattr(new_wave, 'unit'):
new_wave = new_wave.to(u.Angstrom).value
if interp_func is None:
try:
from grizli.utils_c import interp
interp_func = interp.interp_conserve_c
except:
interp_func = utils.interp_conserve
new_flux = [interp_func(new_wave, self.wave*(1+z), self.flux[i,:])
for i in range(self.NZ)]
new_flux = np.array(new_flux)
if in_place:
self.wave = new_wave*1
self.flux = new_flux
return True
else:
if return_array:
return new_flux
else:
return Template(arrays=(new_wave, new_flux),
name=self.name, meta=self.meta,
redshifts=self.redshifts)
def zindex(self, z=0., redshift_type='nearest'):
"""
Get the redshift index of a multi-dimensional template array
Parameters
----------
z : float
Redshift to retrieve
redshift_type : 'nearest', 'interp', 'floor'
Interpolation type:
- 'nearest': nearest step in the template redshift grid
- 'interp': Returns index below ``z`` and interpolation fraction
- 'floor': last index where redshift grid < ``z``.
Returns
-------
iz : int
Array index, i.e, ``self.flux[iz,:]``
frac : float, optional
Fraction for interpolation, if ``redshift_type == 'interp'``.
"""
zint = np.interp(z, self.redshifts, np.arange(self.NZ),
left=0, right=self.NZ-1)
if redshift_type == 'nearest':
iz = | np.round(zint) | numpy.round |
from itertools import product
import pytest
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_allclose, assert_array_less, assert_equal)
from sklearn.linear_model import (
enet_path, ElasticNet as sk_ElasticNet, ElasticNetCV as sk_ElasticNetCV)
from celer import Lasso, ElasticNet, celer_path, ElasticNetCV
from celer.utils.testing import build_dataset
def test_raise_errors_l1_ratio():
with np.testing.assert_raises(ValueError):
ElasticNet(l1_ratio=5.)
with np.testing.assert_raises(NotImplementedError):
ElasticNet(l1_ratio=0.)
with np.testing.assert_raises(NotImplementedError):
X, y = build_dataset(n_samples=30, n_features=50)
y = np.sign(y)
celer_path(X, y, 'logreg', l1_ratio=0.5)
@pytest.mark.parametrize("sparse_X", (True, False))
def test_ElasticNet_Lasso_equivalence(sparse_X):
n_samples, n_features = 50, 100
X, y = build_dataset(n_samples, n_features, sparse_X=sparse_X)
alpha_max = norm(X.T@y, ord=np.inf) / n_samples
alpha = alpha_max / 100.
coef_lasso = Lasso(alpha=alpha).fit(X, y).coef_
coef_enet = ElasticNet(alpha=alpha, l1_ratio=1.0).fit(X, y).coef_
| assert_allclose(coef_lasso, coef_enet) | numpy.testing.assert_allclose |
from __future__ import division, print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import pdb
import argparse
import re
import datetime
import sys
import numpy as np
from scipy.stats import sigmaclip
from scipy.ndimage.filters import median_filter
import fitsio
from astropy.io import fits as fits_astropy
from astropy.table import Table, vstack
from astropy import units
from astropy.coordinates import SkyCoord
from photutils import (CircularAperture, CircularAnnulus,
aperture_photometry, DAOStarFinder)
# Sphinx build would crash
try:
from astrometry.util.file import trymakedirs
from astrometry.util.starutil_numpy import hmsstring2ra, dmsstring2dec
from astrometry.util.util import wcs_pv2sip_hdr
from astrometry.util.ttime import Time
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import match_radec
from astrometry.libkd.spherematch import match_xy
from tractor.splinesky import SplineSky
import legacypipe
from legacypipe.ps1cat import ps1cat
from legacypipe.gaiacat import GaiaCatalog
from legacypipe.survey import radec_at_mjd, get_git_version
from legacypipe.image import validate_procdate_plver
except ImportError:
#pass
raise
CAMERAS=['decam','mosaic','90prime','megaprime']
def ptime(text,t0):
tnow=Time()
print('TIMING:%s ' % text,tnow-t0)
return tnow
def read_lines(fn):
fin=open(fn,'r')
lines=fin.readlines()
fin.close()
if len(lines) < 1: raise ValueError('lines not read properly from %s' % fn)
return np.array( list(np.char.strip(lines)) )
def dobash(cmd):
print('UNIX cmd: %s' % cmd)
if os.system(cmd): raise ValueError
def astropy_to_astrometry_table(t):
T = fits_table()
for c in t.colnames:
T.set(c, t[c])
return T
def _ccds_table(camera='decam'):
'''Initialize the CCDs table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
max_camera_length = max([len(c) for c in CAMERAS])
cols = [
('err_message', 'S30'),
('image_filename', 'S120'),
('image_hdu', '>i2'),
('camera', 'S%i' % max_camera_length),
('expnum', '>i8'),
('plver', 'S8'),
('procdate', 'S19'),
('plprocid', 'S7'),
('ccdname', 'S5'),
('ccdnum', '>i2'),
('expid', 'S17'),
('object', 'S35'),
('propid', 'S10'),
('filter', 'S1'),
('exptime', '>f4'),
('date_obs', 'S26'),
('mjd_obs', '>f8'),
('ut', 'S15'),
('ha', 'S13'),
('airmass', '>f4'),
('fwhm', '>f4'),
('fwhm_cp', '>f4'),
('gain', '>f4'),
('width', '>i2'),
('height', '>i2'),
('ra_bore', '>f8'),
('dec_bore', '>f8'),
('crpix1', '>f4'),
('crpix2', '>f4'),
('crval1', '>f8'),
('crval2', '>f8'),
('cd1_1', '>f4'),
('cd1_2', '>f4'),
('cd2_1', '>f4'),
('cd2_2', '>f4'),
('pixscale', 'f4'),
('zptavg', '>f4'),
('yshift', 'bool'),
# -- CCD-level quantities --
('ra', '>f8'),
('dec', '>f8'),
('skymag', '>f4'),
('skycounts', '>f4'),
('skyrms', '>f4'),
('sig1', '>f4'),
('nmatch_photom', '>i2'),
('nmatch_astrom', '>i2'),
('goodps1', '>i2'),
('goodps1_wbadpix5', '>i2'),
('phoff', '>f4'),
('phrms', '>f4'),
('zpt', '>f4'),
('zpt_wbadpix5', '>f4'),
('transp', '>f4'),
('raoff', '>f4'),
('decoff', '>f4'),
('rarms', '>f4'),
('decrms', '>f4'),
('rastddev', '>f4'),
('decstddev', '>f4')
]
ccds = Table(np.zeros(1, dtype=cols))
return ccds
def _stars_table(nstars=1):
'''Initialize the stars table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
cols = [('image_filename', 'S100'),('image_hdu', '>i2'),
('expid', 'S16'), ('filter', 'S1'),('nmatch', '>i2'),
('x', 'f4'), ('y', 'f4'), ('expnum', '>i8'),
('plver', 'S8'), ('procdate', 'S19'), ('plprocid', 'S7'),
('gain', 'f4'),
('ra', 'f8'), ('dec', 'f8'), ('apmag', 'f4'),('apflux', 'f4'),('apskyflux', 'f4'),('apskyflux_perpix', 'f4'),
('radiff', 'f8'), ('decdiff', 'f8'),
('ps1_mag', 'f4'),
('gaia_g','f8'),('ps1_g','f8'),('ps1_r','f8'),('ps1_i','f8'),('ps1_z','f8'),
('exptime', '>f4')]
stars = Table(np.zeros(nstars, dtype=cols))
return stars
def get_pixscale(camera='decam'):
return {'decam':0.262,
'mosaic':0.262,
'90prime':0.470,
'megaprime':0.185}[camera]
def cols_for_survey_table(which='all'):
"""Return list of -survey.fits table colums
Args:
which: all, numeric,
nonzero_diff (numeric and expect non-zero diff with reference
when compute it)
"""
assert(which in ['all','numeric','nonzero_diff'])
martins_keys = ['airmass', 'ccdskymag']
gods_keys = ['plver', 'procdate', 'plprocid']
if which == 'all':
need_arjuns_keys= ['ra','dec','ra_bore','dec_bore',
'image_filename','image_hdu','expnum','ccdname','object',
'filter','exptime','camera','width','height','propid',
'mjd_obs','ccdnmatch',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff',
'ccdrarms', 'ccddecrms', 'ccdskycounts',
'ccdphrms',
'cd1_1','cd2_2','cd1_2','cd2_1',
'crval1','crval2','crpix1','crpix2']
dustins_keys= ['skyrms', 'sig1', 'yshift']
elif which == 'numeric':
need_arjuns_keys= ['ra','dec','ra_bore','dec_bore',
'expnum',
'exptime','width','height',
'mjd_obs','ccdnmatch',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff',
'cd1_1','cd2_2','cd1_2','cd2_1',
'crval1','crval2','crpix1','crpix2']
dustins_keys= ['skyrms']
elif which == 'nonzero_diff':
need_arjuns_keys= ['ra','dec','ccdnmatch',
'fwhm','zpt','ccdzpt','ccdraoff','ccddecoff']
dustins_keys= ['skyrms']
return need_arjuns_keys + dustins_keys + martins_keys + gods_keys
def create_survey_table(T, surveyfn, camera=None, psf=False, bad_expid=None):
"""input _ccds_table fn
output a table formatted for legacypipe/runbrick
"""
assert(camera in CAMERAS)
need_keys = cols_for_survey_table(which='all')
# Rename
rename_keys= [('zpt','ccdzpt'),
('zptavg','zpt'),
('raoff','ccdraoff'),
('decoff','ccddecoff'),
('skycounts', 'ccdskycounts'),
('skymag', 'ccdskymag'),
('rarms', 'ccdrarms'),
('decrms', 'ccddecrms'),
('phrms', 'ccdphrms'),
('nmatch_photom','ccdnmatch')]
for old,new in rename_keys:
T.rename(old,new)
# Delete
del_keys= list( set(T.get_columns()).difference(set(need_keys)) )
for key in del_keys:
T.delete_column(key)
# precision
T.width = T.width.astype(np.int16)
T.height = T.height.astype(np.int16)
T.cd1_1 = T.cd1_1.astype(np.float32)
T.cd1_2 = T.cd1_2.astype(np.float32)
T.cd2_1 = T.cd2_1.astype(np.float32)
T.cd2_2 = T.cd2_2.astype(np.float32)
if psf:
from legacyzpts.psfzpt_cuts import add_psfzpt_cuts
add_psfzpt_cuts(T, camera, bad_expid)
writeto_via_temp(surveyfn, T)
print('Wrote %s' % surveyfn)
def create_annotated_table(leg_fn, ann_fn, camera, survey, psf=False):
from legacypipe.annotate_ccds import annotate, init_annotations
T = fits_table(leg_fn)
T = survey.cleanup_ccds_table(T)
init_annotations(T)
annotate(T, survey, mzls=(camera == 'mosaic'), bass=(camera == '90prime'),
normalizePsf=psf, carryOn=True)
writeto_via_temp(ann_fn, T)
print('Wrote %s' % ann_fn)
def cols_for_converted_star_table(star_table=None,
which=None):
assert(star_table in ['photom','astrom'])
assert(which in ['all','numeric','nonzero_diff'])
# which
if which == 'all':
need_arjuns_keys= ['filename','expnum','extname',
'ccd_x','ccd_y','ccd_ra','ccd_dec',
'ccd_mag','ccd_sky',
'raoff','decoff',
'magoff',
'nmatch',
'gmag','ps1_g','ps1_r','ps1_i','ps1_z']
# If want it in star- table, add it here
extra_keys= ['image_hdu','filter','ccdname']
elif which == 'numeric':
need_arjuns_keys= ['expnum',
'ccd_x','ccd_y','ccd_ra','ccd_dec',
'ccd_mag','ccd_sky',
'raoff','decoff',
'magoff',
'nmatch',
'gmag','ps1_g','ps1_r','ps1_i','ps1_z']
extra_keys= []
elif which == 'nonzero_diff':
need_arjuns_keys= ['ccd_x','ccd_y','ccd_ra','ccd_dec',
'ccd_mag','ccd_sky',
'raoff','decoff',
'magoff',
'nmatch']
extra_keys= []
# star_table
if star_table == 'photom':
for key in ['raoff','decoff']:
need_arjuns_keys.remove(key)
elif star_table == 'astrom':
for key in ['magoff']:
need_arjuns_keys.remove(key)
# Done
return need_arjuns_keys + extra_keys
def getrms(x):
return np.sqrt( np.mean( np.power(x,2) ) )
def get_bitmask_fn(imgfn):
if 'ooi' in imgfn:
fn= imgfn.replace('ooi','ood')
elif 'oki' in imgfn:
fn= imgfn.replace('oki','ood')
else:
raise ValueError('bad imgfn? no ooi or oki: %s' % imgfn)
return fn
def get_weight_fn(imgfn):
if 'ooi' in imgfn:
fn= imgfn.replace('ooi','oow')
elif 'oki' in imgfn:
fn= imgfn.replace('oki','oow')
else:
raise ValueError('bad imgfn? no ooi or oki: %s' % imgfn)
return fn
class Measurer(object):
"""Main image processing functions for all cameras.
Args:
aprad: Aperture photometry radius in arcsec
skyrad_inner,skyrad_outer: sky annulus in arcsec
det_thresh: minimum S/N for matched filter
match_radius: arcsec matching to gaia/ps1
sn_min,sn_max: if not None then then {min,max} S/N will be enforced from
aperture photoemtry, where S/N = apflux/sqrt(skyflux)
aper_sky_sub: do aperture sky subtraction instead of splinesky
"""
def __init__(self, fn, image_dir='images', aprad=3.5, skyrad_inner=7.0,
skyrad_outer=10.0, det_thresh=8., match_radius=3., sn_min=None,
sn_max=None, aper_sky_sub=False, calibrate=False, quiet=False,
**kwargs):
# Set extra kwargs
self.ps1_pattern= kwargs['ps1_pattern']
self.zptsfile= kwargs.get('zptsfile')
self.prefix= kwargs.get('prefix')
self.verboseplots= kwargs.get('verboseplots')
self.fn = os.path.join(image_dir, fn)
self.fn_base = fn
self.debug= kwargs.get('debug')
self.outdir= kwargs.get('outdir')
self.calibdir = kwargs.get('calibdir')
self.aper_sky_sub = aper_sky_sub
self.calibrate = calibrate
self.aprad = aprad
self.skyrad = (skyrad_inner, skyrad_outer)
self.det_thresh = det_thresh # [S/N]
self.match_radius = match_radius
self.sn_min = sn_min
self.sn_max = sn_max
# Tractor fitting of final star sample (when not doing --psf fitting)
self.stampradius= 4. # [arcsec] Should be a bit bigger than radius=3.5'' aperture
self.tractor_nstars= 30 # Tractorize at most this many stars, saves CPU time
# Set the nominal detection FWHM (in pixels) and detection threshold.
# Read the primary header and the header for this extension.
self.nominal_fwhm = 5.0 # [pixels]
try:
self.primhdr = read_primary_header(self.fn)
except ValueError:
# astropy can handle it
tmp= fits_astropy.open(self.fn)
self.primhdr= tmp[0].header
tmp.close()
del tmp
# CP WCS succeed?
self.goodWcs=True
if not ('WCSCAL' in self.primhdr.keys() and
'success' in self.primhdr['WCSCAL'].strip().lower()):
self.goodWcs=False
# Camera-agnostic primary header cards
try:
self.propid = self.primhdr['PROPID']
except KeyError:
self.propid = self.primhdr.get('DTPROPID')
self.exptime = self.primhdr['EXPTIME']
self.date_obs = self.primhdr['DATE-OBS']
self.mjd_obs = self.primhdr['MJD-OBS']
# Add more attributes.
for key, attrkey in zip(['AIRMASS','HA', 'DATE', 'PLVER', 'PLPROCID'],
['AIRMASS','HA', 'PROCDATE', 'PLVER', 'PLPROCID']):
val = self.primhdr[key]
if type(val) == str:
val = val.strip()
if len(val) == 0:
raise ValueError('Empty header card: %s' % key)
setattr(self, attrkey.lower(), val)
self.expnum = self.get_expnum(self.primhdr)
if not quiet:
print('CP Header: EXPNUM = ',self.expnum)
print('CP Header: PROCDATE = ',self.procdate)
print('CP Header: PLVER = ',self.plver)
print('CP Header: PLPROCID = ',self.plprocid)
self.obj = self.primhdr['OBJECT']
def get_good_image_subregion(self):
'''
Returns x0,x1,y0,y1 of the good region of this chip,
or None if no cut should be applied to that edge; returns
(None,None,None,None) if the whole chip is good.
This cut is applied in addition to any masking in the mask or
invvar map.
'''
return None,None,None,None
def get_expnum(self, primhdr):
return self.primhdr['EXPNUM']
def zeropoint(self, band):
return self.zp0[band]
def extinction(self, band):
return self.k_ext[band]
def set_hdu(self,ext):
self.ext = ext.strip()
self.ccdname= ext.strip()
self.expid = '{:08d}-{}'.format(self.expnum, self.ccdname)
hdulist= fitsio.FITS(self.fn)
self.image_hdu= hdulist[ext].get_extnum() #NOT ccdnum in header!
# use header
self.hdr = fitsio.read_header(self.fn, ext=ext)
# Sanity check
assert(self.ccdname.upper() == self.hdr['EXTNAME'].strip().upper())
self.ccdnum = np.int(self.hdr.get('CCDNUM', 0))
self.gain= self.get_gain(self.hdr)
# WCS
self.wcs = self.get_wcs()
# Pixscale is assumed CONSTANT! per camera
# From CP Header
hdrVal={}
# values we want
for ccd_col in ['width','height','fwhm_cp']:
# Possible keys in hdr for these values
for key in self.cp_header_keys[ccd_col]:
if key in self.hdr.keys():
hdrVal[ccd_col]= self.hdr[key]
break
for ccd_col in ['width','height','fwhm_cp']:
if ccd_col in hdrVal.keys():
#print('CP Header: %s = ' % ccd_col,hdrVal[ccd_col])
setattr(self, ccd_col, hdrVal[ccd_col])
else:
warning='Could not find %s, keys not in cp header: %s' % \
(ccd_col,self.cp_header_keys[ccd_col])
if ccd_col == 'fwhm_cp':
print('WARNING: %s' % warning)
self.fwhm_cp = np.nan
else:
raise KeyError(warning)
x0,x1,y0,y1 = self.get_good_image_subregion()
if x0 is None and x1 is None and y0 is None and y1 is None:
slc = None
else:
x0 = x0 or 0
x1 = x1 or self.width
y0 = y0 or 0
y1 = y1 or self.height
slc = slice(y0,y1),slice(x0,x1)
self.slc = slc
def read_bitmask(self):
dqfn= get_bitmask_fn(self.fn)
if self.slc is not None:
mask = fitsio.FITS(dqfn)[self.ext][self.slc]
else:
mask = fitsio.read(dqfn, ext=self.ext)
mask = self.remap_bitmask(mask)
return mask
def remap_bitmask(self, mask):
return mask
def read_weight(self, clip=True, clipThresh=0.1, scale=True, bitmask=None):
fn = get_weight_fn(self.fn)
if self.slc is not None:
wt = fitsio.FITS(fn)[self.ext][self.slc]
else:
wt = fitsio.read(fn, ext=self.ext)
if scale:
wt = self.scale_weight(wt)
if bitmask is not None:
# Set all masked pixels to have weight zero.
# bitmask value 1 = bad
wt[bitmask > 0] = 0.
if clip and np.sum(wt > 0) > 0:
# Additionally clamp near-zero (incl negative!) weight to zero,
# which arise due to fpack.
if clipThresh > 0.:
thresh = clipThresh * np.median(wt[wt > 0])
else:
thresh = 0.
wt[wt < thresh] = 0
assert(np.all(wt >= 0.))
assert(np.all(np.isfinite(wt)))
return wt
def read_image(self):
'''Read the image and header; scale the image.'''
f = fitsio.FITS(self.fn)[self.ext]
if self.slc is not None:
img = f[self.slc]
else:
img = f.read()
hdr = f.read_header()
img = self.scale_image(img)
return img, hdr
def scale_image(self, img):
return img
def scale_weight(self, img):
return img
def remap_invvar(self, invvar, primhdr, img, dq):
# By default, *do not* remap
return invvar
# A function that can be called by a subclasser's remap_invvar() method
def remap_invvar_shotnoise(self, invvar, primhdr, img, dq):
#
# All three cameras scale the image and weight to units of electrons.
# (actually, not DECam any more! But DECamMeasurer doesn't use this
# function.)
#
print('Remapping weight map for', self.fn)
const_sky = primhdr['SKYADU'] # e/s, Recommended sky level keyword from Frank
expt = primhdr['EXPTIME'] # s
with np.errstate(divide='ignore'):
var_SR = 1./invvar # e**2
print('median img:', np.median(img), 'vs sky estimate * exptime', const_sky*expt)
var_Astro = np.abs(img - const_sky * expt) # img in electrons; Poisson process so variance = mean
wt = 1./(var_SR + var_Astro) # 1/(e**2)
# Zero out NaNs and masked pixels
wt[np.isfinite(wt) == False] = 0.
wt[dq != 0] = 0.
return wt
def create_zero_one_mask(self,bitmask,good=[]):
"""Return zero_one_mask array given a bad pixel map and good pix values
bitmask: ood image
good: list of values to treat as good in the bitmask
"""
# 0 == good, 1 == bad
zero_one_mask= bitmask.copy()
for val in good:
zero_one_mask[zero_one_mask == val]= 0
zero_one_mask[zero_one_mask > 0]= 1
return zero_one_mask
def get_zero_one_mask(self,bitmask,good=[]):
"""Convert bitmask into a zero and ones mask, 1 = bad, 0 = good
bitmask: ood image
good: (optional) list of values to treat as good in the bitmask
default is to use appropiate values for the camera
"""
# Defaults
if len(good) == 0:
if self.camera == 'decam':
# 7 = transient
good=[7]
elif self.camera == 'mosaic':
# 5 is truly a cosmic ray
good=[]
elif self.camera == '90prime':
# 5 can be really bad for a good image because these are subtracted
# and interpolated stats
good= []
return self.create_zero_one_mask(bitmask,good=good)
def sensible_sigmaclip(self, arr, nsigma = 4.0):
'''sigmaclip returns unclipped pixels, lo,hi, where lo,hi are the
mean(goodpix) +- nsigma * sigma
'''
goodpix, lo, hi = sigmaclip(arr, low=nsigma, high=nsigma)
meanval = np.mean(goodpix)
sigma = (meanval - lo) / nsigma
return meanval, sigma
def get_sky_and_sigma(self, img, nsigma=3):
'''returns 2d sky image and sky rms'''
splinesky= False
if splinesky:
skyobj = SplineSky.BlantonMethod(img, None, 256)
skyimg = np.zeros_like(img)
skyobj.addTo(skyimg)
mnsky, skystd = self.sensible_sigmaclip(img - skyimg,nsigma=nsigma)
skymed= np.median(skyimg)
else:
#sky, sig1 = self.sensible_sigmaclip(img[1500:2500, 500:1000])
if self.camera in ['decam', 'megaprime']:
slc=[slice(1500,2500),slice(500,1500)]
elif self.camera in ['mosaic','90prime']:
slc=[slice(500,1500),slice(500,1500)]
else:
raise RuntimeError('unknown camera %s' % self.camera)
clip_vals,_,_ = sigmaclip(img[tuple(slc)],low=nsigma,high=nsigma)
skymed= np.median(clip_vals)
skystd= np.std(clip_vals)
skyimg= np.zeros(img.shape) + skymed
# MAD gives 10% larger value
# sig1= 1.4826 * np.median(np.abs(clip_vals))
return skyimg, skymed, skystd
def remove_sky_gradients(self, img):
# Ugly removal of sky gradients by subtracting median in first x and then y
H,W = img.shape
meds = np.array([np.median(img[:,i]) for i in range(W)])
meds = median_filter(meds, size=5)
img -= meds[np.newaxis,:]
meds = np.array([np.median(img[i,:]) for i in range(H)])
meds = median_filter(meds, size=5)
img -= meds[:,np.newaxis]
def match_ps1_stars(self, px, py, fullx, fully, radius, stars):
#print('Matching', len(px), 'PS1 and', len(fullx), 'detected stars with radius', radius)
I,J,d = match_xy(px, py, fullx, fully, radius)
#print(len(I), 'matches')
dx = px[I] - fullx[J]
dy = py[I] - fully[J]
return I,J,dx,dy
def fitstars(self, img, ierr, xstar, ystar, fluxstar):
'''Fit each star using a Tractor model.'''
import tractor
H, W = img.shape
fwhms = []
radius_pix = self.stampradius / self.pixscale
for ii, (xi, yi, fluxi) in enumerate(zip(xstar, ystar, fluxstar)):
#print('Fitting source', i, 'of', len(Jf))
ix = int(np.round(xi))
iy = int(np.round(yi))
xlo = int( max(0, ix-radius_pix) )
xhi = int( min(W, ix+radius_pix+1) )
ylo = int( max(0, iy-radius_pix) )
yhi = int( min(H, iy+radius_pix+1) )
xx, yy = np.meshgrid(np.arange(xlo, xhi), np.arange(ylo, yhi))
r2 = (xx - xi)**2 + (yy - yi)**2
keep = (r2 < radius_pix**2)
pix = img[ylo:yhi, xlo:xhi].copy()
ie = ierr[ylo:yhi, xlo:xhi].copy()
#print('fitting source at', ix,iy)
#print('number of active pixels:', np.sum(ie > 0), 'shape', ie.shape)
psf = tractor.NCircularGaussianPSF([4.0], [1.0])
tim = tractor.Image(data=pix, inverr=ie, psf=psf)
src = tractor.PointSource(tractor.PixPos(xi-xlo, yi-ylo),
tractor.Flux(fluxi))
tr = tractor.Tractor([tim], [src])
#print('Posterior before prior:', tr.getLogProb())
src.pos.addGaussianPrior('x', 0.0, 1.0)
#print('Posterior after prior:', tr.getLogProb())
tim.freezeAllBut('psf')
psf.freezeAllBut('sigmas')
# print('Optimizing params:')
# tr.printThawedParams()
#print('Parameter step sizes:', tr.getStepSizes())
optargs = dict(priors=False, shared_params=False)
for step in range(50):
dlnp, x, alpha = tr.optimize(**optargs)
#print('dlnp', dlnp)
#print('src', src)
#print('psf', psf)
if dlnp == 0:
break
# Now fit only the PSF size
tr.freezeParam('catalog')
# print('Optimizing params:')
# tr.printThawedParams()
for step in range(50):
dlnp, x, alpha = tr.optimize(**optargs)
#print('dlnp', dlnp)
#print('src', src)
#print('psf', psf)
if dlnp == 0:
break
fwhms.append(2.35 * psf.sigmas[0]) # [pixels]
#model = tr.getModelImage(0)
return np.array(fwhms)
def isolated_radec(self,ra,dec,nn=2,minsep=1./3600):
'''return indices of ra,dec for which the ra,dec points are
AT LEAST a distance minsep away from their nearest neighbor point'''
cat1 = SkyCoord(ra=ra*units.degree, dec=dec*units.degree)
cat2 = SkyCoord(ra=ra*units.degree, dec=dec*units.degree)
idx, d2d, d3d = cat1.match_to_catalog_3d(cat2,nthneighbor=nn)
b= np.array(d2d) >= minsep
return b
def get_ps1_cuts(self,ps1):
"""Returns bool of PS1 sources to keep
ps1: catalogue with ps1 data
"""
gicolor= ps1.median[:,0] - ps1.median[:,2]
return ((ps1.nmag_ok[:, 0] > 0) &
(ps1.nmag_ok[:, 1] > 0) &
(ps1.nmag_ok[:, 2] > 0) &
(gicolor > 0.4) &
(gicolor < 2.7))
def return_on_error(self,err_message='',
ccds=None, stars_photom=None, stars_astrom=None):
"""Sets ccds table err message, zpt to nan, and returns appropriately for self.run()
Args:
err_message: length <= 30
ccds, stars_photom, stars_astrom: (optional) tables partially filled by run()
"""
assert(len(err_message) > 0 & len(err_message) <= 30)
if ccds is None:
ccds= _ccds_table(self.camera)
ccds['image_filename'] = self.fn_base
ccds['err_message']= err_message
ccds['zpt']= np.nan
return ccds, stars_photom, stars_astrom
def run(self, ext=None, save_xy=False, psfex=False, splinesky=False, survey=None):
"""Computes statistics for 1 CCD
Args:
ext: ccdname
save_xy: save daophot x,y and x,y after various cuts to dict and save
to json
Returns:
ccds, stars_photom, stars_astrom
"""
self.set_hdu(ext)
#
t0= Time()
t0= ptime('Measuring CCD=%s from image=%s' % (self.ccdname,self.fn),t0)
# Initialize
ccds = _ccds_table(self.camera)
# FIXME -- could clean up paths here??
ccds['image_filename'] = self.fn_base
ccds['image_hdu'] = self.image_hdu
ccds['ccdnum'] = self.ccdnum
ccds['camera'] = self.camera
ccds['expnum'] = self.expnum
ccds['plver'] = self.plver
ccds['procdate'] = self.procdate
ccds['plprocid'] = self.plprocid
ccds['ccdname'] = self.ccdname
ccds['expid'] = self.expid
ccds['object'] = self.obj
ccds['propid'] = self.propid
ccds['filter'] = self.band
ccds['exptime'] = self.exptime
ccds['date_obs'] = self.date_obs
ccds['mjd_obs'] = self.mjd_obs
ccds['ut'] = self.ut
ccds['ra_bore'] = self.ra_bore
ccds['dec_bore'] = self.dec_bore
ccds['ha'] = self.ha
ccds['airmass'] = self.airmass
ccds['gain'] = self.gain
ccds['pixscale'] = self.pixscale
ccds['yshift'] = 'YSHIFT' in self.primhdr
ccds['width'] = self.width
ccds['height'] = self.height
ccds['fwhm_cp'] = self.fwhm_cp
hdr_fwhm = self.fwhm_cp
notneeded_cols= ['avsky']
for ccd_col in ['avsky', 'crpix1', 'crpix2', 'crval1', 'crval2',
'cd1_1','cd1_2', 'cd2_1', 'cd2_2']:
if ccd_col.upper() in self.hdr.keys():
#print('CP Header: %s = ' % ccd_col,self.hdr[ccd_col])
ccds[ccd_col]= self.hdr[ccd_col]
else:
if ccd_col in notneeded_cols:
ccds[ccd_col]= np.nan
else:
raise KeyError('Could not find %s, keys not in cp header:' \
% ccd_col,ccd_col)
exptime = ccds['exptime'].data[0]
airmass = ccds['airmass'].data[0]
print('Band {}, Exptime {}, Airmass {}'.format(self.band, exptime, airmass))
# WCS: 1-indexed so pixel pixelxy2radec(1,1) corresponds to img[0,0]
H = ccds['height'].data[0]
W = ccds['width'].data[0]
print('Image size:', W,H)
ccdra, ccddec = self.wcs.pixelxy2radec((W+1) / 2.0, (H+1) / 2.0)
ccds['ra'] = ccdra # [degree]
ccds['dec'] = ccddec # [degree]
t0= ptime('header-info',t0)
if not self.goodWcs:
print('WCS Failed on CCD {}'.format(self.ccdname))
return self.return_on_error(err_message='WCS Failed', ccds=ccds)
if self.exptime == 0:
print('Exptime = 0 on CCD {}'.format(self.ccdname))
return self.return_on_error(err_message='Exptime = 0', ccds=ccds)
self.bitmask = self.read_bitmask()
weight = self.read_weight(bitmask=self.bitmask, scale=False)
if np.all(weight == 0):
txt = 'All weight-map pixels are zero on CCD {}'.format(self.ccdname)
print(txt)
return self.return_on_error(txt,ccds=ccds)
# bizarro image CP20151119/k4m_151120_040715_oow_zd_v1.fits.fz
if np.all(np.logical_or(weight == 0, weight == 1)):
txt = 'All weight-map pixels are zero or one'
print(txt)
return self.return_on_error(txt,ccds=ccds)
weight = self.scale_weight(weight)
if psfex:
# Quick check for PsfEx file
psf = self.get_psfex_model()
if psf.psfex.sampling == 0.:
print('PsfEx model has SAMPLING=0')
nacc = psf.header.get('ACCEPTED')
print('PsfEx model number of stars accepted:', nacc)
return self.return_on_error(err_message='Bad PSF model', ccds=ccds)
self.img,hdr = self.read_image()
# Per-pixel error -- weight is 1/sig*2, scaled by scale_weight()
medweight = np.median(weight[(weight > 0) * (self.bitmask == 0)])
# Undo the weight scaling to get sig1 back into native image units
wscale = self.scale_weight(1.)
ccds['sig1'] = 1. / np.sqrt(medweight / wscale)
self.invvar = self.remap_invvar(weight, self.primhdr, self.img, self.bitmask)
t0= ptime('read image',t0)
# Measure the sky brightness and (sky) noise level.
zp0 = self.zeropoint(self.band)
#print('Computing the sky background.')
sky_img, skymed, skyrms = self.get_sky_and_sigma(self.img)
img_sub_sky= self.img - sky_img
# Bunch of sky estimates
# Median of absolute deviation (MAD), std dev = 1.4826 * MAD
print('sky from median of image= %.2f' % skymed)
skybr = zp0 - 2.5*np.log10(skymed / self.pixscale / self.pixscale / exptime)
print('Sky brightness: {:.3f} mag/arcsec^2 (assuming nominal zeropoint)'.format(skybr))
ccds['skyrms'] = skyrms / exptime # e/sec
ccds['skycounts'] = skymed / exptime # [electron/pix]
ccds['skymag'] = skybr # [mag/arcsec^2]
t0= ptime('measure-sky',t0)
# Load PS1 & Gaia catalogues
# We will only used detected sources that have PS1 or Gaia matches
# So cut to this super set immediately
ps1 = None
try:
ps1 = ps1cat(ccdwcs=self.wcs,
pattern= self.ps1_pattern).get_stars(magrange=None)
except OSError:
print('No PS1 stars found for this image -- outside the PS1 footprint, or in the Galactic plane?')
if ps1 is not None and len(ps1) == 0:
ps1 = None
# PS1 cuts
if ps1 is not None and len(ps1):
ps1.cut( self.get_ps1_cuts(ps1) )
if len(ps1) == 0:
ps1 = None
else:
# Convert to Legacy Survey mags
ps1.legacy_survey_mag = self.ps1_to_observed(ps1)
print(len(ps1), 'PS1 stars')
gaia = GaiaCatalog().get_catalog_in_wcs(self.wcs)
assert(gaia is not None)
assert(len(gaia) > 0)
gaia = GaiaCatalog.catalog_nantozero(gaia)
assert(gaia is not None)
print(len(gaia), 'Gaia stars')
# Move Gaia stars to the epoch of this image.
gaia.ra_orig = gaia.ra.copy()
gaia.dec_orig = gaia.dec.copy()
ra,dec = radec_at_mjd(gaia.ra, gaia.dec, gaia.ref_epoch.astype(float),
gaia.pmra, gaia.pmdec, gaia.parallax, self.mjd_obs)
gaia.ra = ra
gaia.dec = dec
if not psfex:
ccds,photom,astrom = self.run_apphot(ccds, ps1, gaia, skyrms, hdr_fwhm,
sky_img, ext=ext, save_xy=save_xy)
# yuck!
photom = astropy_to_astrometry_table(photom)
astrom = astropy_to_astrometry_table(astrom)
return ccds,photom,astrom
return self.run_psfphot(ccds, ps1, gaia, zp0, exptime, airmass, sky_img,
splinesky, survey)
def run_apphot(self, ccds, ps1, gaia, skyrms, hdr_fwhm, sky_img,
ext=None, save_xy=False):
t0= Time()
img_sub_sky = self.img - sky_img
# badpix5 test, all good PS1
if self.camera in ['90prime','mosaic']:
_, ps1_x, ps1_y = self.wcs.radec2pixelxy(ps1.ra_ok,ps1.dec_ok)
ps1_x-= 1.
ps1_y-= 1.
ap_for_ps1 = CircularAperture((ps1_x, ps1_y), 5.)
# special mask, only gt 0 where badpix eq 5
img_mask_5= np.zeros(self.bitmask.shape, dtype=self.bitmask.dtype)
img_mask_5[self.bitmask == 5]= 1
phot_for_mask_5 = aperture_photometry(img_mask_5, ap_for_ps1)
flux_for_mask_5 = phot_for_mask_5['aperture_sum']
ccds['goodps1']= len(ps1)
ccds['goodps1_wbadpix5']= len(ps1[flux_for_mask_5.data > 0])
# Detect stars on the image.
# 10 sigma, sharpness, roundness all same as IDL zeropoints (also the defaults)
# Exclude_border=True removes the stars with centroid on or out of ccd edge
# Good, but we want to remove with aperture touching ccd edge too
print('det_thresh = %d' % self.det_thresh)
#threshold=self.det_thresh * stddev_mad,
dao = DAOStarFinder(fwhm= hdr_fwhm,
threshold=self.det_thresh * skyrms,
sharplo=0.2, sharphi=1.0, roundlo=-1.0, roundhi=1.0,
exclude_border=False)
obj= dao(self.img)
if len(obj) < self.minstar:
dao.threshold /= 2.
obj= dao(self.img)
if len(obj) < self.minstar:
return self.return_on_error('dao found < %d sources' % self.minstar,ccds=ccds)
t0= ptime('detect-stars',t0)
# We for sure know that sources near edge could be bad
edge_sep = 1. + self.skyrad[1]
edge_sep_px = edge_sep/self.pixscale
ht,wid = self.img.shape
away_from_edge= (
(obj['xcentroid'] > edge_sep_px) &
(obj['xcentroid'] < wid - edge_sep_px) &
(obj['ycentroid'] > edge_sep_px) &
(obj['ycentroid'] < ht - edge_sep_px))
obj= obj[away_from_edge]
objra, objdec = self.wcs.pixelxy2radec(obj['xcentroid']+1, obj['ycentroid']+1)
nobj = len(obj)
print('{} sources detected with detection threshold {}-sigma minus edge sources'.format(nobj, self.det_thresh))
ccds['nstarfind']= nobj
if nobj < self.minstar:
return self.return_on_error('after edge cuts < %d sources' % self.minstar,ccds=ccds)
if save_xy:
# Arrays of length number of all daophot found sources
all_xy= fits_table()
all_xy.set('x', obj['xcentroid'].data)
all_xy.set('y', obj['ycentroid'].data)
all_xy.set('ra', objra)
all_xy.set('dec', objdec)
all_xy.writeto('%s_%s_all_xy.fits' %
(os.path.basename(self.fn).replace('.fits','').replace('.fz',''),
ext))
# Matching
matched= {}
# Photometry
matched['photom_obj'], matched['photom_ref'], _ = \
match_radec(objra, objdec, ps1.ra_ok, ps1.dec_ok,
self.match_radius/3600.0,
nearest=True)
t0= ptime('matching-for-photometer',t0)
if len(matched['photom_obj']) < self.minstar:
return self.return_on_error('photom matched < %d sources' % self.minstar,ccds=ccds)
stars_photom,err= self.do_Photometry(obj[matched['photom_obj']],
ps1[matched['photom_ref']],
ccds=ccds, save_xy=save_xy)
if len(err) > 0:
return self.return_on_error(err,ccds=ccds,
stars_photom=stars_photom)
t0= ptime('photutils-photometry',t0)
# Astrometry
matched['astrom_obj'], matched['astrom_ref'], _ = \
match_radec(objra, objdec, gaia.ra, gaia.dec,
self.match_radius/3600.0,
nearest=True)
t0= ptime('matching-for-astrometry',t0)
# Use gaia
if len(matched['astrom_obj']) < self.minstar:
return self.return_on_error('astrom gaia matched < %d sources' % self.minstar,ccds=ccds,stars_photom=stars_photom)
stars_astrom,err= self.do_Astrometry(
obj[matched['astrom_obj']],
ref_ra= gaia.ra[matched['astrom_ref']],
ref_dec= gaia.dec[matched['astrom_ref']],
ccds=ccds)
if len(err) > 0:
return self.return_on_error(err,ccds=ccds,
stars_photom=stars_photom,
stars_astrom=stars_astrom)
t0= ptime('did-astrometry',t0)
# FWHM
# Tractor on specific SN sources
ap = CircularAperture((stars_photom['x'], stars_photom['y']),
self.aprad / self.pixscale)
skyphot = aperture_photometry(sky_img, ap)
skyflux = skyphot['aperture_sum'].data
star_SN= stars_photom['apflux'].data / np.sqrt(stars_photom['apflux'].data + skyflux)
t0= ptime('photutils-photometry-SN',t0)
# Brightest N stars
sn_cut= ((star_SN >= 10.) &
(star_SN <= 100.))
if len(star_SN[sn_cut]) < 10.:
sn_cut= star_SN >= 10.
if len(star_SN[sn_cut]) < 10.:
sn_cut= np.ones(len(star_SN),bool)
i_low_hi= np.argsort(star_SN)[sn_cut]
# brightest stars in sample, at most self.tractor_nstars
sample=dict(x= stars_photom['x'][i_low_hi][-self.tractor_nstars:],
y= stars_photom['y'][i_low_hi][-self.tractor_nstars:],
apflux= stars_photom['apflux'][i_low_hi][-self.tractor_nstars:],
sn= star_SN[i_low_hi][-self.tractor_nstars:])
#ivar = np.zeros_like(img) + 1.0/sig1**2
# Hack! To avoid 1/0 and sqrt(<0) just considering Poisson Stats due to sky
ierr = 1.0/np.sqrt(sky_img)
fwhms = self.fitstars(img_sub_sky, ierr, sample['x'], sample['y'], sample['apflux'])
ccds['fwhm'] = np.median(fwhms) # fwhms= 2.35 * psf.sigmas
print('FWHM med=%f, std=%f, std_med=%f' % (np.median(fwhms),np.std(fwhms),np.std(fwhms)/len(sample['x'])))
#ccds['seeing'] = self.pixscale * np.median(fwhms)
t0= ptime('Tractor fit FWHM to %d/%d stars' % (len(sample['x']),len(stars_photom)), t0)
# RESULTS
print("RESULTS %s" % ext)
print('Photometry: %d stars' % ccds['nmatch_photom'])
print('Offset (mag) =%.4f, rms=0.4%f' % (ccds['phoff'],ccds['phrms']))
print('Zeropoint %.4f' % (ccds['zpt'],))
print('Transparency %.4f' % (ccds['transp'],))
print('Astrometry: %d stars' % ccds['nmatch_astrom'])
print('Offsets (arcsec) RA=%.6f, Dec=%.6f' % (ccds['raoff'], ccds['decoff']))
t0= ptime('all-computations-for-this-ccd',t0)
# Plots for comparing to Arjuns zeropoints*.ps
if self.verboseplots:
self.make_plots(stars,dmag,ccds['zpt'],ccds['transp'])
t0= ptime('made-plots',t0)
return ccds, stars_photom, stars_astrom
def run_psfphot(self, ccds, ps1, gaia, zp0, exptime, airmass, sky_img,
splinesky, survey):
t0= Time()
# Now put Gaia stars into the image and re-fit their centroids
# and fluxes using the tractor with the PsfEx PSF model.
# assume that the CP WCS has gotten us to within a few pixels
# of the right answer. Find Gaia stars, initialize Tractor
# sources there, optimize them and see how much they want to
# move.
psf = self.get_psfex_model()
# Just keep the CP FWHM measurement!!
ccds['fwhm'] = ccds['fwhm_cp']
#ccds['fwhm'] = psf.fwhm
if splinesky:
sky = self.get_splinesky()
print('Instantiating and subtracting sky model')
skymod = np.zeros_like(self.img)
sky.addTo(skymod)
# Apply the same transformation that was applied to the image...
skymod = self.scale_image(skymod)
#print('Old sky_img: avg', np.mean(sky_img), 'min/max', np.min(sky_img), np.max(sky_img))
#print('Skymod: avg', np.mean(skymod), 'min/max', skymod.min(), skymod.max())
fit_img = self.img - skymod
else:
fit_img = self.img - sky_img
with np.errstate(invalid='ignore'):
# sqrt(0.) can trigger complaints; https://github.com/numpy/numpy/issues/11448
ierr = np.sqrt(self.invvar)
# Gaia
ra,dec = radec_at_mjd(gaia.ra, gaia.dec, gaia.ref_epoch.astype(float),
gaia.pmra, gaia.pmdec, gaia.parallax, self.mjd_obs)
gaia.rename('source_id', 'gaia_sourceid')
gaia.ra_now = ra
gaia.dec_now = dec
gaia.rename('ra', 'ra_gaia')
gaia.rename('dec', 'dec_gaia')
for b in ['g', 'bp', 'rp']:
mag = gaia.get('phot_%s_mean_mag' % b)
sn = gaia.get('phot_%s_mean_flux_over_error' % b)
magerr = np.abs(2.5/np.log(10.) * 1./np.maximum(1., sn))
gaia.set('phot_%s_mean_mag_error' % b, magerr)
# FIXME -- NaNs?
gaia.flux0 = np.ones(len(gaia), np.float32)
# we set 'astrom' and omit 'photom'; it will get filled in with zeros.
gaia.astrom = np.ones(len(gaia), bool)
refs = [gaia]
if ps1 is not None:
# PS1 for photometry
# Initial flux estimate, from nominal zeropoint
ps1.flux0 = (10.**((zp0 - ps1.legacy_survey_mag) / 2.5) * exptime).astype(np.float32)
# we don't have/use proper motions for PS1 stars
ps1.rename('ra_ok', 'ra_now')
ps1.rename('dec_ok', 'dec_now')
ps1.ra_ps1 = ps1.ra_now.copy()
ps1.dec_ps1 = ps1.dec_now.copy()
ps1.ps1_objid = ps1.obj_id
for band in 'grizY':
i = ps1cat.ps1band.get(band, None)
if i is None:
print('No band', band, 'in PS1 catalog')
continue
ps1.set('ps1_'+band.lower(), ps1.median[:,i].astype(np.float32))
# we set 'photom' and omit 'astrom'; it will get filled in with zeros.
ps1.photom = np.ones (len(ps1), bool)
# Match PS1 to Gaia stars within 1".
I,J,d = match_radec(gaia.ra_gaia, gaia.dec_gaia,
ps1.ra_ps1, ps1.dec_ps1, 1./3600.,
nearest=True)
print(len(I), 'of', len(gaia), 'Gaia and', len(ps1), 'PS1 stars matched')
# Merged = PS1 + unmatched Gaia
if len(I):
# Merge columns for the matched stars
for c in gaia.get_columns():
G = gaia.get(c)
# If column exists in both (eg, ra_now, dec_now), override
# the PS1 value with the Gaia value; except for "photom".
if c in ps1.get_columns():
X = ps1.get(c)
else:
X = np.zeros(len(ps1), G.dtype)
X[J] = G[I]
ps1.set(c, X)
# unmatched Gaia stars
unmatched = np.ones(len(gaia), bool)
unmatched[I] = False
gaia.cut(unmatched)
del unmatched
refs.append(ps1)
if len(refs) == 1:
refs = refs[0]
else:
refs = merge_tables(refs, columns='fillzero')
cols = [('ra_gaia', np.double),
('dec_gaia', np.double),
('gaia_sourceid', np.int64),
('phot_g_mean_mag', np.float32),
('phot_g_mean_mag_error', np.float32),
('phot_bp_mean_mag', np.float32),
('phot_bp_mean_mag_error', np.float32),
('phot_rp_mean_mag', np.float32),
('phot_rp_mean_mag_error', np.float32),
('ra_ps1', np.double),
('dec_ps1', np.double),
('ps1_objid', np.int64),
('ps1_g', np.float32),
('ps1_r', np.float32),
('ps1_i', np.float32),
('ps1_z', np.float32),
('ps1_y', np.float32),
('ra_now', np.double),
('dec_now', np.double),
('flux0', np.float32),
('legacy_survey_mag', np.float32),
('astrom', bool),
('photom', bool),
]
refcols = refs.get_columns()
for c,dt in cols:
if not c in refcols:
refs.set(c, np.zeros(len(refs), dt))
refcols = refs.get_columns()
wantcols = dict(cols)
for c in refcols:
if not c in wantcols:
refs.delete_column(c)
continue
# dt = wantcols[c]
# rdt = refs.get(c).dtype
# if rdt != dt:
# print('Warning: column', c, 'has type', rdt, 'not', dt)
# print('(Cleaned) reference stars:')
# refs.about()
if False:
from astrometry.util.plotutils import PlotSequence
ps = PlotSequence('astromfit')
plt.clf()
plt.hist((fit_img * ierr).ravel(), range=(-5,5), bins=100)
plt.xlabel('Image pixel S/N')
ps.savefig()
# Run tractor fitting on the ref stars, using the PsfEx model.
phot = self.tractor_fit_sources(refs.ra_now, refs.dec_now, refs.flux0,
fit_img, ierr, psf)
print('Got photometry results for', len(phot), 'reference stars')
if len(phot) == 0:
return self.return_on_error('No photometry available',ccds=ccds)
# Cut to ref stars that were photometered
refs.cut(phot.iref)
phot.delete_column('iref')
refs.delete_column('flux0')
phot.raoff = (refs.ra_now - phot.ra_fit ) * 3600. * np.cos(np.deg2rad(refs.dec_now))
phot.decoff = (refs.dec_now - phot.dec_fit) * 3600.
dra = phot.raoff [refs.astrom]
ddec = phot.decoff[refs.astrom]
nastrom = len(dra)
raoff = np.median(dra)
decoff = np.median(ddec)
rastd = np.std(dra)
decstd = np.std(ddec)
ra_clip, _, _ = sigmaclip(dra, low=3., high=3.)
rarms = getrms(ra_clip)
dec_clip, _, _ = sigmaclip(ddec, low=3., high=3.)
decrms = getrms(dec_clip)
print('RA, Dec offsets (arcsec): %.4f, %.4f' % (raoff, decoff))
print('RA, Dec stddev (arcsec): %.4f, %.4f' % (rastd, decstd))
print('RA, Dec RMS (arcsec): %.4f, %.4f' % (rarms, decrms))
ok, = np.nonzero(phot.flux > 0)
phot.instpsfmag = np.zeros(len(phot), np.float32)
phot.instpsfmag[ok] = -2.5*np.log10(phot.flux[ok] / exptime)
# Uncertainty on psfmag
phot.dpsfmag = np.zeros(len(phot), np.float32)
phot.dpsfmag[ok] = np.abs((-2.5 / np.log(10.)) * phot.dflux[ok] / phot.flux[ok])
H,W = self.bitmask.shape
phot.bitmask = self.bitmask[np.clip(phot.y1, 0, H-1).astype(int),
np.clip(phot.x1, 0, W-1).astype(int)]
phot.psfmag = np.zeros(len(phot), np.float32)
dmag = (refs.legacy_survey_mag - phot.instpsfmag)[refs.photom]
if len(dmag):
dmag = dmag[np.isfinite(dmag)]
print('Zeropoint: using', len(dmag), 'good stars')
dmag, _, _ = sigmaclip(dmag, low=2.5, high=2.5)
nphotom = len(dmag)
print('Zeropoint: using', nphotom, 'stars after sigma-clipping')
zptstd = np.std(dmag)
zptmed = np.median(dmag)
dzpt = zptmed - zp0
kext = self.extinction(self.band)
transp = 10.**(-0.4 * (-dzpt - kext * (airmass - 1.0)))
print('Number of stars used for zeropoint median %d' % nphotom)
print('Zeropoint %.4f' % zptmed)
print('Offset from nominal: %.4f' % dzpt)
print('Scatter: %.4f' % zptstd)
print('Transparency %.4f' % transp)
ok = (phot.instpsfmag != 0)
phot.psfmag[ok] = phot.instpsfmag[ok] + zptmed
else:
nphotom = 0
dzpt = 0.
zptmed = 0.
zptstd = 0.
transp = 0.
for c in ['x0','y0','x1','y1','flux','raoff','decoff', 'psfmag',
'dflux','dx','dy']:
phot.set(c, phot.get(c).astype(np.float32))
phot.rename('x0', 'x_ref')
phot.rename('y0', 'y_ref')
phot.rename('x1', 'x_fit')
phot.rename('y1', 'y_fit')
phot.add_columns_from(refs)
# Save CCD-level information in the per-star table.
phot.ccd_raoff = np.zeros(len(phot), np.float32) + raoff
phot.ccd_decoff = np.zeros(len(phot), np.float32) + decoff
phot.ccd_phoff = np.zeros(len(phot), np.float32) + dzpt
phot.ccd_zpt = np.zeros(len(phot), np.float32) + zptmed
phot.expnum = np.zeros(len(phot), np.int64) + self.expnum
phot.ccdname = np.array([self.ccdname] * len(phot))
phot.filter = np.array([self.band] * len(phot))
# ugh, pad ccdname to 3 characters for DECam
if self.camera == 'decam' and len(self.ccdname) < 3:
phot.ccdname = phot.ccdname.astype('S3')
phot.exptime = np.zeros(len(phot), np.float32) + self.exptime
phot.gain = np.zeros(len(phot), np.float32) + self.gain
phot.airmass = np.zeros(len(phot), np.float32) + airmass
import photutils
apertures_arcsec_diam = [6, 7, 8]
for arcsec_diam in apertures_arcsec_diam:
ap = photutils.CircularAperture(np.vstack((phot.x_fit, phot.y_fit)).T,
arcsec_diam / 2. / self.pixscale)
with np.errstate(divide='ignore'):
err = 1./ierr
apphot = photutils.aperture_photometry(fit_img, ap, error=err, mask=(ierr==0))
phot.set('apflux_%i' % arcsec_diam, apphot.field('aperture_sum').data.astype(np.float32))
phot.set('apflux_%i_err' % arcsec_diam, apphot.field('aperture_sum_err').data.astype(np.float32))
# Add to the zeropoints table
ccds['raoff'] = raoff
ccds['decoff'] = decoff
ccds['rastddev'] = rastd
ccds['decstddev'] = decstd
ccds['rarms'] = rarms
ccds['decrms'] = decrms
ccds['phoff'] = dzpt
ccds['phrms'] = zptstd
ccds['zpt'] = zptmed
ccds['transp'] = transp
ccds['nmatch_photom'] = nphotom
ccds['nmatch_astrom'] = nastrom
# .ra,.dec = Gaia else PS1
phot.ra = phot.ra_gaia
phot.dec = phot.dec_gaia
I, = np.nonzero(phot.ra == 0)
phot.ra [I] = phot.ra_ps1 [I]
phot.dec[I] = phot.dec_ps1[I]
stars_astrom = phot
# Create subset table for Eddie's ubercal
stars_photom = phot.copy()
cols = ['ra', 'dec', 'flux', 'dflux', 'chi2', 'fracmasked', 'instpsfmag',
'dpsfmag',
'bitmask', 'x_fit', 'y_fit', 'gaia_sourceid', 'ra_gaia', 'dec_gaia',
'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag',
'phot_g_mean_mag_error', 'phot_bp_mean_mag_error',
'phot_rp_mean_mag_error',
'ps1_objid', 'ra_ps1', 'dec_ps1',
'ps1_g', 'ps1_r', 'ps1_i', 'ps1_z', 'ps1_y', 'legacy_survey_mag',
'expnum', 'ccdname', 'exptime', 'gain', 'airmass', 'filter',
'apflux_6', 'apflux_7', 'apflux_8',
'apflux_6_err', 'apflux_7_err', 'apflux_8_err',
'ra_now', 'dec_now', 'ra_fit', 'dec_fit', 'x_ref', 'y_ref'
]
for c in stars_photom.get_columns():
if not c in cols:
stars_photom.delete_column(c)
t0= ptime('all-computations-for-this-ccd',t0)
# Plots for comparing to Arjuns zeropoints*.ps
if self.verboseplots:
self.make_plots(stars,dmag,ccds['zpt'],ccds['transp'])
t0= ptime('made-plots',t0)
return ccds, stars_photom, stars_astrom
def ps1_to_observed(self, ps1):
colorterm = self.colorterm_ps1_to_observed(ps1.median, self.band)
ps1band = ps1cat.ps1band[self.band]
return ps1.median[:, ps1band] + np.clip(colorterm, -1., +1.)
def get_splinesky_merged_filename(self):
expstr = '%08i' % self.expnum
fn = os.path.join(self.calibdir, self.camera, 'splinesky-merged', expstr[:5],
'%s-%s.fits' % (self.camera, expstr))
return fn
def get_splinesky_unmerged_filename(self):
expstr = '%08i' % self.expnum
return os.path.join(self.calibdir, self.camera, 'splinesky', expstr[:5], expstr,
'%s-%s-%s.fits' % (self.camera, expstr, self.ext))
def get_splinesky(self):
# Find splinesky model file and read it
import tractor
from tractor.utils import get_class_from_name
# Look for merged file
fn = self.get_splinesky_merged_filename()
#print('Looking for file', fn)
if os.path.exists(fn):
print('Reading splinesky-merged {}'.format(fn))
T = fits_table(fn)
if validate_procdate_plver(fn, 'table', self.expnum, self.plver,
self.procdate, self.plprocid, data=T):
I, = np.nonzero((T.expnum == self.expnum) *
np.array([c.strip() == self.ext for c in T.ccdname]))
if len(I) == 1:
Ti = T[I[0]]
# Remove any padding
h,w = Ti.gridh, Ti.gridw
Ti.gridvals = Ti.gridvals[:h, :w]
Ti.xgrid = Ti.xgrid[:w]
Ti.ygrid = Ti.ygrid[:h]
skyclass = Ti.skyclass.strip()
clazz = get_class_from_name(skyclass)
fromfits = getattr(clazz, 'from_fits_row')
sky = fromfits(Ti)
return sky
# Look for single-CCD file
fn = self.get_splinesky_unmerged_filename()
#print('Reading file', fn)
if not os.path.exists(fn):
return None
print('Reading splinesky {}'.format(fn))
hdr = read_primary_header(fn)
if not validate_procdate_plver(fn, 'primaryheader', self.expnum, self.plver,
self.procdate, self.plprocid, data=hdr):
return None
try:
skyclass = hdr['SKY']
except NameError:
raise NameError('SKY not in header: skyfn={}'.format(fn))
clazz = get_class_from_name(skyclass)
if getattr(clazz, 'from_fits', None) is not None:
fromfits = getattr(clazz, 'from_fits')
sky = fromfits(fn, hdr)
else:
fromfits = getattr(clazz, 'fromFitsHeader')
sky = fromfits(hdr, prefix='SKY_')
return sky
def tractor_fit_sources(self, ref_ra, ref_dec, ref_flux, img, ierr,
psf, normalize_psf=True):
import tractor
plots = False
#plot_this = np.hypot(x - 118, y - 1276) < 5
plot_this = False
if plots:
from astrometry.util.plotutils import PlotSequence
ps = PlotSequence('astromfit')
print('Fitting positions & fluxes of %i stars' % len(ref_ra))
cal = fits_table()
# These x0,y0,x1,y1 are zero-indexed coords.
cal.x0 = []
cal.y0 = []
cal.x1 = []
cal.y1 = []
cal.flux = []
cal.dx = []
cal.dy = []
cal.dflux = []
cal.psfsum = []
cal.iref = []
cal.chi2 = []
cal.fracmasked = []
for istar in range(len(ref_ra)):
ok,x,y = self.wcs.radec2pixelxy(ref_ra[istar], ref_dec[istar])
x -= 1
y -= 1
# Fitting radius
R = 10
H,W = img.shape
xlo = int(x - R)
ylo = int(y - R)
if xlo < 0 or ylo < 0:
continue
xhi = xlo + R*2
yhi = ylo + R*2
if xhi >= W or yhi >= H:
continue
subimg = img[ylo:yhi+1, xlo:xhi+1]
# FIXME -- check that ierr is correct
subie = ierr[ylo:yhi+1, xlo:xhi+1]
subpsf = psf.constantPsfAt(x, y)
psfsum = np.sum(subpsf.img)
if normalize_psf:
# print('Normalizing PsfEx model with sum:', s)
subpsf.img /= psfsum
if np.all(subie == 0):
#print('Inverse-variance map is all zero')
continue
#print('PSF model:', subpsf)
#print('PSF image sum:', subpsf.img.sum())
tim = tractor.Image(data=subimg, inverr=subie, psf=subpsf)
flux0 = ref_flux[istar]
#print('Zp0', zp0, 'mag', ref.mag[istar], 'flux', flux0)
x0 = x - xlo
y0 = y - ylo
src = tractor.PointSource(tractor.PixPos(x0, y0),
tractor.Flux(flux0))
tr = tractor.Tractor([tim], [src])
tr.freezeParam('images')
optargs = dict(priors=False, shared_params=False)
# The initial flux estimate doesn't seem to work too well,
# so just for plotting's sake, fit flux first
src.freezeParam('pos')
tr.optimize(**optargs)
src.thawParam('pos')
#print('Optimizing position of Gaia star', istar)
if plots and plot_this:
plt.clf()
plt.subplot(2,2,1)
plt.imshow(subimg, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,2)
mod = tr.getModelImage(0)
plt.imshow(mod, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow((subimg - mod) * subie, interpolation='nearest', origin='lower')
plt.colorbar()
plt.suptitle('Before')
ps.savefig()
#print('Initial flux', flux0)
for step in range(50):
dlnp, x, alpha = tr.optimize(**optargs)
#print('delta position', src.pos.x - x0, src.pos.y - y0,
# 'flux', src.brightness, 'dlnp', dlnp)
if dlnp == 0:
break
#print('Getting variance estimate: thawed params:')
#tr.printThawedParams()
variance = tr.optimize(variance=True, just_variance=True, **optargs)
# Yuck -- if inverse-variance is all zero, weird-shaped result...
if len(variance) == 4 and variance[3] is None:
print('No variance estimate available')
continue
mod = tr.getModelImage(0)
chi = (subimg - mod) * subie
psfimg = mod / mod.sum()
# profile-weighted chi-squared
cal.chi2.append(np.sum(chi**2 * psfimg))
# profile-weighted fraction of masked pixels
#cal.fracmasked.append(np.sum(psfimg * (ierr == 0)))
cal.fracmasked.append(np.sum(psfimg * (subie == 0)))
cal.psfsum.append(psfsum)
cal.x0.append(x0 + xlo)
cal.y0.append(y0 + ylo)
cal.x1.append(src.pos.x + xlo)
cal.y1.append(src.pos.y + ylo)
cal.flux.append(src.brightness.getValue())
cal.iref.append(istar)
std = np.sqrt(variance)
cal.dx.append(std[0])
cal.dy.append(std[1])
cal.dflux.append(std[2])
if plots and plot_this:
plt.clf()
plt.subplot(2,2,1)
plt.imshow(subimg, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,2)
mod = tr.getModelImage(0)
plt.imshow(mod, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow((subimg - mod) * subie, interpolation='nearest', origin='lower')
plt.colorbar()
plt.suptitle('After')
ps.savefig()
cal.to_np_arrays()
cal.ra_fit,cal.dec_fit = self.wcs.pixelxy2radec(cal.x1 + 1, cal.y1 + 1)
return cal
def get_psfex_merged_filename(self):
expstr = '%08i' % self.expnum
fn = os.path.join(self.calibdir, self.camera, 'psfex-merged', expstr[:5],
'%s-%s.fits' % (self.camera, expstr))
return fn
def get_psfex_model(self):
import tractor
# Look for merged PsfEx file
fn = self.get_psfex_merged_filename()
expstr = '%08i' % self.expnum
#print('Looking for PsfEx file', fn)
if os.path.exists(fn):
print('Reading psfex-merged {}'.format(fn))
T = fits_table(fn)
if validate_procdate_plver(fn, 'table', self.expnum, self.plver,
self.procdate, self.plprocid, data=T):
I, = np.nonzero((T.expnum == self.expnum) *
np.array([c.strip() == self.ext for c in T.ccdname]))
if len(I) == 1:
Ti = T[I[0]]
# Remove any padding
degree = Ti.poldeg1
# number of terms in polynomial
ne = (degree + 1) * (degree + 2) // 2
Ti.psf_mask = Ti.psf_mask[:ne, :Ti.psfaxis1, :Ti.psfaxis2]
psfex = tractor.PsfExModel(Ti=Ti)
psf = tractor.PixelizedPsfEx(None, psfex=psfex)
psf.fwhm = Ti.psf_fwhm
psf.header = {}
return psf
# Look for single-CCD PsfEx file
fn = os.path.join(self.calibdir, self.camera, 'psfex', expstr[:5], expstr,
'%s-%s-%s.fits' % (self.camera, expstr, self.ext))
#print('Reading PsfEx file', fn)
if not os.path.exists(fn):
return None
print('Reading psfex {}'.format(fn))
hdr = read_primary_header(fn)
if not validate_procdate_plver(fn, 'primaryheader', self.expnum, self.plver,
self.procdate, self.plprocid, data=hdr):
return None
hdr = fitsio.read_header(fn, ext=1)
psf = tractor.PixelizedPsfEx(fn)
psf.header = hdr
psf.fwhm = hdr['PSF_FWHM']
return psf
def do_Photometry(self, obj,ps1, ccds,
save_xy=False):
"""Measure zeropoint relative to PS1
Args:
obj: ps1-matched sources detected with dao phot
ps1: ps1 source matched to obj
ccds: partially filled _ccds_table
save_xy: if True save a fits table containing
ps1_mag and apmag for matches sources and associated
photometric cuts
Returns:
stars_photom: fits table for stars
err_message: '' if okay, 'some error text' otherwise, this will end up being
stored in ccds['err_message']
"""
print('Photometry on %s stars' % len(ps1))
objra, objdec = self.wcs.pixelxy2radec(obj['xcentroid']+1, obj['ycentroid']+1)
cuts,phot= self.get_photometric_cuts(obj,cuts_only=False)
assert(len(phot['apflux']) == len(obj))
final_cut= ((cuts['good_flux_and_mag']) &
(cuts['no_badpix_in_ap_0']) &
(cuts['is_iso']))
if len(obj[final_cut]) == 0:
return _stars_table(),'photometry failed, no stars after cuts'
# Stars table
ccds['nmatch_photom'] = len(obj[final_cut])
print('Photometry %s stars after obj cuts' % ccds['nmatch_photom'])
stars_photom = _stars_table(nstars=ccds['nmatch_photom'])
stars_photom['apmag'] = phot['apmags'][final_cut]
stars_photom['ps1_mag'] = ps1.legacy_survey_mag[final_cut]
if save_xy:
# Save ps1_mag and apmag for every matched source
all_stars=fits_table()
all_stars.set('apmag', phot['apmags'].data)
all_stars.set('ps1_mag', ps1.legacy_survey_mag)
all_stars.set('match_x', obj['xcentroid'].data)
all_stars.set('match_y', obj['ycentroid'].data)
all_stars.set('match_ra', objra)
all_stars.set('match_dec', objdec)
# Then bool cuts for the above arrays
for key in cuts.keys():
all_stars.set(key, cuts[key])
# Avoid memoryview write error
for col in all_stars.get_columns():
all_stars.set(col,np.array(all_stars.get(col)))
all_stars.writeto('%s_%s_all_stars.fits' %
(os.path.basename(self.fn).replace('.fits','').replace('.fz',''),
self.ccdname))
# Add additional info
stars_photom['nmatch']= ccds['nmatch_photom']
self.add_ccd_info_to_stars_table(stars_photom, ccds)
star_kwargs= {"keep": final_cut,
"obj":obj,
"objra":objra,
"objdec":objdec,
"apflux":phot['apflux'],
"apskyflux":phot['apskyflux'],
"apskyflux_perpix":phot['apskyflux_perpix']}
self.add_obj_info_to_stars_table(stars_photom,**star_kwargs)
for ps1_band,ps1_iband in zip(['g','r','i','z'],[0,1,2,3]):
stars_photom['ps1_%s' % ps1_band]= ps1.median[final_cut, ps1_iband]
# Zeropoint
stars_photom['dmagall'] = stars_photom['ps1_mag'] - stars_photom['apmag']
dmag, _, _ = sigmaclip(stars_photom['dmagall'], low=2.5, high=2.5)
dmagmed = | np.median(dmag) | numpy.median |
import tensorflow as tf
import numpy as np
import os, datetime, itertools, shutil, gym, sys
from tf_rl.common.visualise import plot_Q_values
from tf_rl.common.wrappers import MyWrapper, CartPole_Pixel, wrap_deepmind, make_atari
"""
TF basic Utility functions
"""
def eager_setup():
"""
it eables an eager execution in tensorflow with config that allows us to flexibly access to a GPU
from multiple python scripts
:return:
"""
config = tf.compat.v1.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
tf.compat.v1.enable_eager_execution(config=config)
tf.compat.v1.enable_resource_variables()
"""
Common Utility functions
"""
def get_alg_name():
"""Returns the name of the algorithm.
We assume that the directory architecutre for that algo looks like below
- Atari: `examples/algo_name/algo_name_eager.py`
- Cartpole: `examples/algo_name/algo_name_eager_cartpole.py`
* where algo_name must be uppercase/capital letters!!
"""
alg_name = sys.argv[0].rsplit("/")[-1].rsplit(".")[0].replace("_eager", "")
return alg_name
def invoke_agent_env(params, alg):
"""Returns the wrapped env and string name of agent, then Use `eval(agent)` to activate it from main script
"""
if params.mode == "Atari":
env = wrap_deepmind(make_atari("{}NoFrameskip-v4".format(params.env_name, skip_frame_k=params.skip_frame_k)),
skip_frame_k=params.skip_frame_k)
if params.debug_flg:
agent = "{}_debug".format(alg)
else:
agent = "{}".format(alg)
else:
agent = "{}".format(alg)
if params.mode == "CartPole":
env = MyWrapper(gym.make("CartPole-v0"))
elif params.mode == "CartPole-p":
env = CartPole_Pixel(gym.make("CartPole-v0"))
return agent, env
def create_log_model_directory(params, alg):
"""
Create a directory for log/model
this is compatible with Google colab and can connect to MyDrive through the authorisation step
:param params:
:return:
"""
if params.mode in ["Atari", "atari", "MuJoCo", "mujoco"]:
second_name = params.env_name
else:
second_name = params.mode
now = datetime.datetime.now()
if params.google_colab:
# mount the MyDrive on google drive and create the log directory for saving model and logging using tensorboard
params.log_dir, params.model_dir, params.log_dir_colab, params.model_dir_colab = _setup_on_colab(alg,
params.mode)
else:
if params.debug_flg:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
else:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
return params
def create_loss_func(loss_name="mse"):
if loss_name == "huber":
loss_fn = tf.compat.v1.losses.huber_loss
elif loss_name == "mse":
loss_fn = tf.compat.v1.losses.mean_squared_error
else:
assert False, "Choose the loss_fn from either huber or mse"
return loss_fn
def get_ready(params):
"""
Print out the content of params
:param params:
:return:
"""
for key, item in vars(params).items():
print(key, " : ", item)
def create_checkpoint(model, optimizer, model_dir):
"""
Create a checkpoint for managing a model
:param model:
:param optimizer:
:param model_dir:
:return:
"""
checkpoint_dir = model_dir
check_point = tf.train.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step())
manager = tf.train.CheckpointManager(check_point, checkpoint_dir, max_to_keep=3)
# try re-loading the previous training progress!
try:
print("Try loading the previous training progress")
check_point.restore(manager.latest_checkpoint)
assert tf.compat.v1.train.get_global_step().numpy() != 0
print("===================================================\n")
print("Restored the model from {}".format(checkpoint_dir))
print("Currently we are on time-step: {}".format(tf.compat.v1.train.get_global_step().numpy()))
print("\n===================================================")
except:
print("===================================================\n")
print("Previous Training files are not found in Directory: {}".format(checkpoint_dir))
print("\n===================================================")
return manager
def _setup_on_colab(alg_name, env_name):
"""
Mount MyDrive to current instance through authentication of Google account
Then use it as a backup of training related files
:param env_name:
:return:
"""
# mount your drive on google colab
from google.colab import drive
drive.mount("/content/gdrive")
log_dir = "/content/TF_RL/logs/logs/{}/{}".format(alg_name, env_name)
model_dir = "/content/TF_RL/logs/models/{}/{}".format(alg_name, env_name)
log_dir_colab = "/content/gdrive/My Drive/logs/logs/{}/{}".format(alg_name, env_name)
model_dir_colab = "/content/gdrive/My Drive/logs/models/{}/{}".format(alg_name, env_name)
# create the logs directory under the root dir
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
# if the previous directory existed in My Drive, then we would continue training on top of the previous training
if os.path.isdir(log_dir_colab):
print("=== {} IS FOUND ===".format(log_dir_colab))
copy_dir(log_dir_colab, log_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(log_dir_colab))
os.makedirs(log_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
if os.path.isdir(model_dir_colab):
print("=== {} IS FOUND ===".format(model_dir_colab))
copy_dir(model_dir_colab, model_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(model_dir_colab))
os.makedirs(model_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
return log_dir, model_dir, log_dir_colab, model_dir_colab
class AnnealingSchedule:
"""
Scheduling the gradually decreasing value, e.g., epsilon or beta params
"""
def __init__(self, start=1.0, end=0.1, decay_steps=500, decay_type="linear"):
self.start = start
self.end = end
self.decay_steps = decay_steps
self.annealed_value = np.linspace(start, end, decay_steps)
self.decay_type = decay_type
def old_get_value(self, timestep):
"""
Deprecated
:param timestep:
:return:
"""
if self.decay_type == "linear":
return self.annealed_value[min(timestep, self.decay_steps) - 1]
# don't use this!!
elif self.decay_type == "curved":
if timestep < self.decay_steps:
return self.start * 0.9 ** (timestep / self.decay_steps)
else:
return self.end
def get_value(self):
timestep = tf.train.get_or_create_global_step() # we are maintaining the global-step in train.py so it is accessible
if self.decay_type == "linear":
return self.annealed_value[min(timestep.numpy(), self.decay_steps) - 1]
# don't use this!!
elif self.decay_type == "curved":
if timestep.numpy() < self.decay_steps:
return self.start * 0.9 ** (timestep.numpy() / self.decay_steps)
else:
return self.end
def copy_dir(src, dst, symlinks=False, ignore=None, verbose=False):
"""
copy the all contents in `src` directory to `dst` directory
Usage:
```python
delete_files("./bb/")
```
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if verbose:
print("From:{}, To: {}".format(s, d))
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def delete_files(folder, verbose=False):
"""
delete the all contents in `folder` directory
Usage:
```python
copy_dir("./aa/", "./bb/")
```
"""
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
if verbose:
print("{} has been deleted".format(file_path))
except Exception as e:
print(e)
class RunningMeanStd:
"""
Running Mean and Standard Deviation for normalising the observation!
This is mainly used in MuJoCo experiments, e.g. DDPG!
Formula:
- Normalisation: y = (x-mean)/std
"""
def __init__(self, shape, clip_range=5, epsilon=1e-2):
self.size = shape
self.epsilon = epsilon
self.clip_range = clip_range
self._sum = 0.0
self._sumsq = np.ones(self.size, np.float32) * epsilon
self._count = np.ones(self.size, np.float32) * epsilon
self.mean = self._sum / self._count
self.std = np.sqrt(np.maximum(self._sumsq / self._count - np.square(self.mean), np.square(self.epsilon)))
def update(self, x):
"""
update the mean and std by given input
:param x: can be observation, reward, or action!!
:return:
"""
x = x.reshape(-1, self.size)
self._sum = x.sum(axis=0)
self._sumsq = np.square(x).sum(axis=0)
self._count = np.array([len(x)], dtype='float64')
self.mean = self._sum / self._count
self.std = np.sqrt(np.maximum(self._sumsq / self._count - np.square(self.mean), np.square(self.epsilon)))
def normalise(self, x):
"""
Using well-maintained mean and std, we normalise the input followed by update them.
:param x:
:return:
"""
result = np.clip((x - self.mean) / self.std, -self.clip_range, self.clip_range)
return result
def test(sess, agent, env, params):
xmax = agent.num_action
ymax = 3
print("\n ===== TEST STARTS: {0} Episodes ===== \n".format(params.test_episodes))
for i in range(params.test_episodes):
state = env.reset()
for t in itertools.count():
env.render()
q_values = sess.run(agent.pred, feed_dict={agent.state: state.reshape(params.state_reshape)})[0]
action = np.argmax(q_values)
plot_Q_values(q_values, xmax=xmax, ymax=ymax)
obs, reward, done, _ = env.step(action)
state = obs
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
return
class logger:
def __init__(self, params):
self.params = params
self.prev_update_step = 0
def logging(self, time_step, current_episode, exec_time, reward_buffer, loss, epsilon, cnt_action):
"""
Logging function
:param time_step:
:param max_steps:
:param current_episode:
:param exec_time:
:param reward:
:param loss:
:param cnt_action:
:return:
"""
cnt_actions = dict((x, cnt_action.count(x)) for x in set(cnt_action))
episode_steps = time_step - self.prev_update_step
# remaing_time_step/exec_time_for_one_step
remaining_time = str(datetime.timedelta(
seconds=(self.params.num_frames - time_step) * exec_time / (episode_steps)))
print(
"{0}/{1}: Ep: {2}({3:.1f} fps), Remaining: {4}, (R) {5} Ep => [MEAN: {6:.3f}, MAX: {7:.3f}], (last ep) Loss: {8:.3f}, Eps: {9:.3f}, Act: {10}".format(
time_step, self.params.num_frames, current_episode, episode_steps / exec_time, remaining_time,
self.params.reward_buffer_ep, np.mean(reward_buffer), np.max(reward_buffer), loss,
epsilon, cnt_actions
))
self.prev_update_step = time_step
"""
Algorithm Specific Utility functions
"""
class her_sampler:
# borrow from: https://github.com/TianhongDai/hindsight-experience-replay/blob/master/her.py
def __init__(self, replay_strategy, replay_k, reward_func=None):
self.replay_strategy = replay_strategy
self.replay_k = replay_k
if self.replay_strategy == 'future':
self.future_p = 1 - (1. / (1 + replay_k))
else:
self.future_p = 0
self.reward_func = reward_func
def sample_her_transitions(self, episode_batch, batch_size_in_transitions):
T = episode_batch['actions'].shape[1]
rollout_batch_size = episode_batch['actions'].shape[0]
batch_size = batch_size_in_transitions
# select which rollouts and which timesteps to be used
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy() for key in episode_batch.keys()}
# her idx
her_indexes = np.where(np.random.uniform(size=batch_size) < self.future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# replace go with achieved goal
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# to get the params to re-compute reward
transitions['r'] = np.expand_dims(self.reward_func(transitions['ag_next'], transitions['g'], None), 1)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
return transitions
def action_postprocessing(action, params):
action += params.noise_eps * params.max_action * np.random.randn(*action.shape)
action = np.clip(action, -params.max_action, params.max_action)
# random actions...
random_actions = np.random.uniform(low=-params.max_action,
high=params.max_action,
size=params.num_action)
# choose if use the random actions
action += np.random.binomial(1, params.random_eps, 1)[0] * (random_actions - action)
return action
def state_unpacker(state):
"""
Given the dictionary of state, it unpacks and returns processed items as numpy.ndarray
Sample input:
{'observation': array([ 1.34193265e+00, 7.49100375e-01, 5.34722720e-01, 1.30179339e+00, 8.86399624e-01,
4.24702091e-01, -4.01392554e-02, 1.37299250e-01, -1.10020629e-01, 2.91834773e-06,
-4.72661656e-08, -3.85214084e-07, 5.92637053e-07, 1.12208536e-13, -7.74656889e-06,
-7.65027248e-08, 4.92570535e-05, 1.88857148e-07, -2.90549459e-07, -1.18156686e-18,
7.73934983e-06, 7.18103404e-08, -2.42928780e-06, 4.93607091e-07, 1.70999820e-07]),
'achieved_goal': array([1.30179339, 0.88639962, 0.42470209]),
'desired_goal': array([1.4018907 , 0.62021174, 0.4429846 ])}
:param state:
:return:
"""
obs = np.array(state["observation"])
achieved_goal = np.array(state["achieved_goal"])
desired_goal = np.array(state["desired_goal"])
remaining_goal = simple_goal_subtract(desired_goal, achieved_goal)
return obs, achieved_goal, desired_goal, remaining_goal
def simple_goal_subtract(goal, achieved_goal):
"""
We subtract the achieved goal from the desired one to see how much we are still far from the desired position
"""
assert goal.shape == achieved_goal.shape
return goal - achieved_goal
ALIVE_BONUS = 1.0
def get_distance(env_name):
"""
This returns the distance according to the implementation of env
For instance, halfcheetah and humanoid have the different way to return the distance
so that we need to deal with them accordingly.
:return: func to calculate the distance(float)
"""
obj_name = env_name.split("-")[0]
if not obj_name.find("Ant") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/ant.py#L14
distance = info["reward_forward"]
return distance
elif not obj_name.find("HalfCheetah") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/half_cheetah.py
distance = info["reward_run"]
return distance
elif not obj_name.find("Hopper") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/hopper.py#L15
distance = (reward - ALIVE_BONUS) + 1e-3 * np.square(action).sum()
return distance
elif not obj_name.find("Humanoid") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/humanoid.py#L30
distance = info["reward_linvel"] / 1.25
return distance
elif not obj_name.find("Swimmer") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/swimmer.py#L15
distance = info["reward_fwd"]
return distance
elif not obj_name.find("Walker2d") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/walker2d.py#L16 -> original version
distance = (reward - ALIVE_BONUS) + 1e-3 * np.square(action).sum()
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/walker2d_v3.py#L90 -> version 3.0
# distance = info["x_velocity"]
return distance
elif not obj_name.find("Centipede") == -1:
def func(action, reward, info):
distance = info["reward_forward"]
return distance
else:
assert False, "This env: {} is not supported yet.".format(env_name)
return func
"""
TODO: I think I will remove this.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
===== Tracker is A class for storing iteration-specific metrics. ====
"""
class Tracker(object):
"""A class for storing iteration-specific metrics.
The internal format is as follows: we maintain a mapping from keys to lists.
Each list contains all the values corresponding to the given key.
For example, self.data_lists['train_episode_returns'] might contain the
per-episode returns achieved during this iteration.
Attributes:
data_lists: dict mapping each metric_name (str) to a list of said metric
across episodes.
"""
def __init__(self):
self.data_lists = {}
def append(self, data_pairs):
"""Add the given values to their corresponding key-indexed lists.
Args:
data_pairs: A dictionary of key-value pairs to be recorded.
"""
for key, value in data_pairs.items():
if key not in self.data_lists:
self.data_lists[key] = []
self.data_lists[key].append(value)
"""
Update methods
"""
def sync_main_target(sess, target, source):
"""
Synchronise the models
from <NAME>'s excellent RL repo
https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/Double%20DQN%20Solution.ipynb
:param main:
:param target:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
op = target_w.assign(source_w)
update_ops.append(op)
sess.run(update_ops)
def soft_target_model_update(sess, target, source, tau=1e-2):
"""
Soft update model parameters.
target = tau * source + (1 - tau) * target
:param main:
:param target:
:param tau:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
# target = tau * source + (1 - tau) * target
op = target_w.assign(tau * source_w + (1 - tau) * target_w)
update_ops.append(op)
sess.run(update_ops)
@tf.contrib.eager.defun(autograph=False)
def soft_target_model_update_eager(target, source, tau=1e-2):
"""
Soft update model parameters.
target = tau * source + (1 - tau) * target
:param main:
:param target:
:param tau:
:return:
"""
for param, target_param in zip(source.weights, target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
"""
Gradient Clipping
"""
def gradient_clip_fn(flag=None):
"""
given a flag, create the clipping function and returns it as a function
currently it supports:
- by_value
- norm
- None
:param flag:
:return:
"""
if flag == "":
def _func(grads):
return grads
elif flag == "by_value":
def _func(grads):
grads = [ClipIfNotNone(grad, -1., 1.) for grad in grads]
return grads
elif flag == "norm":
def _func(grads):
grads, _ = tf.clip_by_global_norm(grads, 10.0)
return grads
else:
assert False, "Choose the gradient clipping function from by_value, norm, or nothing!"
return _func
def ClipIfNotNone(grad, _min, _max):
"""
Reference: https://stackoverflow.com/a/39295309
:param grad:
:return:
"""
if grad is None:
return grad
return tf.clip_by_value(grad, _min, _max)
"""
Test Methods
"""
def eval_Agent(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
all_rewards = list()
print("=== Evaluation Mode ===")
for ep in range(n_trial):
state = env.reset()
done = False
episode_reward = 0
while not done:
# epsilon-greedy for evaluation using a fixed epsilon of 0.05(Nature does this!)
if np.random.uniform() < 0.05:
action = np.random.randint(agent.num_action)
else:
action = np.argmax(agent.predict(state))
next_state, reward, done, _ = env.step(action)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
# if this is running on Google Colab, we would store the log/models to mounted MyDrive
if agent.params.google_colab:
delete_files(agent.params.model_dir_colab)
delete_files(agent.params.log_dir_colab)
copy_dir(agent.params.log_dir, agent.params.log_dir_colab)
copy_dir(agent.params.model_dir, agent.params.model_dir_colab)
if n_trial > 2:
print("=== Evaluation Result ===")
all_rewards = np.array([all_rewards])
print("| Max: {} | Min: {} | STD: {} | MEAN: {} |".format( | np.max(all_rewards) | numpy.max |
#!/usr/bin/env python
u"""
fit.py
Written by <NAME> (05/2021)
Utilities for calculating average fits from ATL03 Geolocated Photon Data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
scikit-learn: Machine Learning in Python
http://scikit-learn.org/stable/index.html
https://github.com/scikit-learn/scikit-learn
UPDATE HISTORY:
Written 05/2021
"""
import operator
import itertools
import numpy as np
import scipy.stats
import scipy.signal
import scipy.optimize
import sklearn.neighbors
# PURPOSE: compress complete list of valid indices into a set of ranges
def compress_list(i,n):
"""
Compress complete list of valid indices into a set of ranges
Arguments
---------
i: indices to compress
n: largest gap between indices to accept for range
"""
for a,b in itertools.groupby(enumerate(i), lambda v: ((v[1]-v[0])//n)*n):
group = list(map(operator.itemgetter(1),b))
yield (group[0], group[-1])
# PURPOSE: centers the transmit-echo-path histogram reported by ATL03
# using an iterative edit to distinguish between signal and noise
def extract_tep_histogram(tep_hist_time,tep_hist,tep_range_prim):
"""
Centers the transmit-echo-path histogram reported by ATL03
using an iterative edit to distinguish between signal and noise
"""
# ATL03 recommends subset between 15-30 ns to avoid secondary
# using primary histogram range values from ATL03 tep attributes
i, = np.nonzero((tep_hist_time >= tep_range_prim[0]) &
(tep_hist_time < tep_range_prim[1]))
t_tx = np.copy(tep_hist_time[i])
n_tx = len(t_tx)
# noise samples of tep_hist (first 5ns and last 10 ns)
ns,ne = (tep_range_prim[0]+5e-9,tep_range_prim[1]-10e-9)
noise, = np.nonzero((t_tx <= ns) | (t_tx >= ne))
noise_p1 = []
# signal samples of tep_hist
signal = sorted(set(np.arange(n_tx)) - set(noise))
# number of iterations
n_iter = 0
while (set(noise) != set(noise_p1)) & (n_iter < 10):
# value of noise in tep histogram
tep_noise_value = np.sqrt(np.sum(tep_hist[i][noise]**2)/n_tx)
p_tx = np.abs(np.copy(tep_hist[i]) - tep_noise_value)
# calculate centroid of tep_hist
t0_tx = np.sum(t_tx[signal]*p_tx[signal])/np.sum(p_tx[signal])
# calculate cumulative distribution function
TX_cpdf = np.cumsum(p_tx[signal]/np.sum(p_tx[signal]))
# linearly interpolate to 16th and 84th percentile for RDE
TX16,TX84 = np.interp([0.16,0.84],TX_cpdf,t_tx[signal]-t0_tx)
# calculate width of transmitted pulse (RDE)
W_TX = 0.5*(TX84 - TX16)
# recalculate noise
noise_p1 = np.copy(noise)
ns,ne = (t0_tx-6.0*W_TX,t0_tx+6.0*W_TX)
noise, = np.nonzero((t_tx <= ns) | (t_tx >= ne))
signal = sorted(set(np.arange(n_tx)) - set(noise))
# add 1 to counter
n_iter += 1
# valid primary TEP return has full-width at half max < 3 ns
mx = np.argmax(p_tx[signal])
halfmax = np.max(p_tx[signal])/2.0
H1 = np.interp(halfmax,p_tx[signal][:mx],t_tx[signal][:mx])
H2 = np.interp(halfmax,p_tx[signal][:mx:-1],t_tx[signal][:mx:-1])
FWHM = H2 - H1
# return values
return (t_tx[signal]-t0_tx,p_tx[signal],W_TX,FWHM,ns,ne)
# PURPOSE: calculate the interquartile range (Pritchard et al, 2009) and
# robust dispersion estimator (Smith et al, 2017) of the model residuals
def filter_elevation(r0):
"""
Calculates the interquartile range (Pritchard et al, 2009) and
robust dispersion estimator (Smith et al, 2017) of the model residuals
Arguments
---------
r0: height residuals
"""
# calculate percentiles for IQR and RDE
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
# median: 50th percentile
Q1,Q3,P16,P84,MEDIAN = np.percentile(r0,[25,75,16,84,50])
# calculate interquartile range
IQR = Q3 - Q1
# calculate robust dispersion estimator (RDE)
RDE = P84 - P16
# IQR pass: residual-(median value) is within 75% of IQR
# RDE pass: residual-(median value) is within 50% of P84-P16
return (0.75*IQR,0.5*RDE,MEDIAN)
# PURPOSE: try fitting a surface to the signal photons with progressively
# less confidence if no valid surface is found
def try_surface_fit(x, y, z, confidence_mask, dist_along, SURF_TYPE='linear',
ITERATE=25, CONFIDENCE=[4,3,2,1,0]):
"""
Try fitting a surface to the signal photons with progressively
less confidence if no valid surface is found
"""
# try with progressively less confidence
for i,conf in enumerate(CONFIDENCE):
ind, = np.nonzero(confidence_mask >= conf)
centroid = dict(x=dist_along, y=np.mean(y[ind]))
try:
surf = reduce_surface_fit(x[ind], y[ind], z[ind], centroid, ind,
SURF_TYPE=SURF_TYPE, ITERATE=ITERATE)
except (ValueError, np.linalg.linalg.LinAlgError):
pass
else:
return (i+1,surf,centroid)
# if still no values found: return infinite values
# will need to attempt a backup algorithm
surf = dict(error=np.full(1,np.inf))
centroid = None
return (None,surf,centroid)
# PURPOSE: iteratively fit a polynomial surface to the elevation data to
# reduce to within a valid window
def reduce_surface_fit(x, y, z, centroid, ind, SURF_TYPE='linear', ITERATE=25):
"""
Iteratively fit a polynomial surface to the elevation data to reduce to
within a valid surface window
"""
# calculate x and y relative to centroid point
rel_x = x - centroid['x']
# Constant Term
Z0 = np.ones_like((z))
if (SURF_TYPE == 'linear'):# linear fit
SURFMAT = np.transpose([Z0,rel_x])
elif (SURF_TYPE == 'quadratic'):# quadratic fit
SURFMAT = np.transpose([Z0,rel_x,rel_x**2])
# number of points for fit and number of terms in fit
n_max,n_terms = np.shape(SURFMAT)
# run only if number of points is above number of terms
FLAG1 = ((n_max - n_terms) > 10)
# maximum allowable window size
H_win_max = 20.0
# minimum allowable window size
H_win_min = 3.0
# set initial window to the full z range
window = z.max() - z.min()
window_p1 = np.copy(window)
# initial indices for reducing to window
filt = np.arange(n_max)
filt_p1 = np.copy(filt)
filt_p2 = np.copy(filt_p1)
if FLAG1:
# save initial indices for fitting all photons for confidence level
indices = ind.copy()
# run fit program for polynomial type
fit = fit_surface(x, y, z, centroid, SURF_TYPE=SURF_TYPE)
# number of iterations performed
n_iter = 1
# save beta coefficients
beta_mat = np.copy(fit['beta'])
error_mat = np.copy(fit['error'])
# residuals of model fit
resid = z - np.dot(SURFMAT,beta_mat)
# standard deviation of the residuals
resid_std = np.std(resid)
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(np.max(z)-np.min(z))
# IQR pass: residual-(median value) is within 75% of IQR
# RDE pass: residual-(median value) is within 50% of P84-P16
IQR,RDE,MEDIAN = filter_elevation(resid)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
filt, = np.nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
# save iteration of window
window_p1 = np.copy(window)
# run only if number of points is above number of terms
n_rem = np.count_nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
FLAG1 = ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# iterate until there are no additional removed photons
while FLAG1 & FLAG2 & FLAG3:
# fit selected photons for window
x_filt,y_filt,z_filt,indices = (x[filt],y[filt],z[filt],ind[filt])
# run fit program for polynomial type
fit = fit_surface(x_filt,y_filt,z_filt,centroid,SURF_TYPE=SURF_TYPE)
# add to number of iterations performed
n_iter += 1
# save model coefficients
beta_mat = np.copy(fit['beta'])
error_mat = np.copy(fit['error'])
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(np.max(z_filt)-np.min(z_filt))
# save number of points
n_max = len(z_filt)
# residuals of model fit
resid = z - np.dot(SURFMAT,beta_mat)
# standard deviation of the residuals
resid_std = np.std(resid)
# IQR pass: residual-(median value) is within 75% of IQR
# RDE pass: residual-(median value) is within 50% of P84-P16
IQR,RDE,MEDIAN = filter_elevation(resid)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
# filter out using median statistics and refit
filt_p2 = np.copy(filt_p1)
filt_p1 = np.copy(filt)
filt, = np.nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
# save iteration of window
window_p1 = np.copy(window)
# run only if number of points is above number of terms
n_rem = np.count_nonzero(np.abs(resid-MEDIAN) <= (window/2.0))
FLAG1 = ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# return reduced model fit
FLAG3 = (set(filt) == set(filt_p1))
if FLAG1 & FLAG3 & (window <= H_win_max):
return {'beta':beta_mat, 'error':error_mat, 'MSE':MSE, 'NRMSE':NRMSE,
'DOF':DOF, 'count':n_max, 'indices':indices, 'iterations':n_iter,
'window':window, 'RDE':RDE}
else:
raise ValueError('No valid data points found')
# PURPOSE: fit a polynomial surface to the elevation data
def fit_surface(x, y, z, centroid, SURF_TYPE='linear'):
"""
Fit a polynomial surface to the elevation data
"""
# calculate x and y relative to centroid point
rel_x = x - centroid['x']
# Constant Term
Z0 = np.ones_like((z))
# Surface design matrix
if (SURF_TYPE == 'linear'):# linear fit
SURFMAT = np.transpose([Z0,rel_x])
elif (SURF_TYPE == 'quadratic'):# quadratic fit
SURFMAT = np.transpose([Z0,rel_x,rel_x**2])
# number of points for fit and number of terms in fit
n_max,n_terms = np.shape(SURFMAT)
# Standard Least-Squares fitting (the [0] denotes coefficients output)
beta_mat = np.linalg.lstsq(SURFMAT,z,rcond=-1)[0]
# modelled surface elevation
model = np.dot(SURFMAT,beta_mat)
# residual of fit
res = z - model
# nu = Degrees of Freedom = number of measurements-number of parameters
nu = n_max - n_terms
# Mean square error
# MSE = (1/nu)*sum((Y-X*B)**2)
MSE = np.dot(np.transpose(z - model),(z - model))/nu
# elevation surface error analysis
Hinv = np.linalg.inv(np.dot(np.transpose(SURFMAT),SURFMAT))
# Taking the diagonal components of the cov matrix
hdiag = np.diag(Hinv)
# Default is 95% confidence interval
alpha = 1.0 - (0.95)
# Student T-Distribution with D.O.F. nu
# t.ppf parallels tinv in matlab
tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu)
# beta_err = t(nu,1-alpha/2)*standard error
std_error = np.sqrt(MSE*hdiag)
model_error = np.dot(SURFMAT,tstar*std_error)
return {'beta':beta_mat, 'error':tstar*std_error, 'model':model,
'model_error': model_error, 'residuals':res, 'MSE':MSE, 'DOF':nu}
# PURPOSE: try fitting a function to the signal photon histograms
# with progressively less confidence if no valid fit is found
def try_histogram_fit(x, y, z, confidence_mask, dist_along, dt,
FIT_TYPE='gaussian', ITERATE=25, BACKGROUND=0, CONFIDENCE=[2,1,0]):
"""
Try fitting a function to the signal photon histograms with
progressively less confidence if no valid fit is found
"""
# try with progressively less confidence
for i,conf in enumerate(CONFIDENCE):
ind, = np.nonzero(confidence_mask >= conf)
centroid = dict(x=dist_along, y=np.mean(y[ind]))
try:
surf = reduce_histogram_fit(x[ind], y[ind], z[ind], ind,
dt, FIT_TYPE=FIT_TYPE, ITERATE=ITERATE, PEAKS=2,
BACKGROUND=BACKGROUND)
except (ValueError, RuntimeError, SyntaxError):
pass
else:
return (i+1,surf,centroid)
# if still no values found: return infinite values
# will need to attempt a backup algorithm
surf = dict(error=np.full(1,np.inf))
centroid = None
return (None,surf,centroid)
# PURPOSE: iteratively use decomposition fitting to the elevation data to
# reduce to within a valid window
def reduce_histogram_fit(x, y, z, ind, dt, FIT_TYPE='gaussian',
ITERATE=25, PEAKS=2, BACKGROUND=0):
"""
Iteratively use decomposition fitting to the elevation data to reduce
to within a valid surface window
"""
# speed of light
c = 299792458.0
# use same delta time as calculating first photon bias
# so that the residuals will be the same
dz = dt*c
# number of background photons in each bin
N_BG = dz*BACKGROUND
# create a histogram of the heights
zmin,zmax = (z.min(),z.max())
z_full = np.arange(zmin,zmax+dz,dz)
nz = len(z_full)
# maximum allowable window size
H_win_max = 20.0
# minimum allowable window size
H_win_min = 3.0
# set initial window to the full z range
window = zmax - zmin
window_p1 = np.copy(window)
# number of data points
n_max = len(z)
# number of terms in fit
if (FIT_TYPE == 'gaussian'):# gaussian fit
n_terms = 3
elif (FIT_TYPE == 'general'):# generalized gaussian fit
n_terms = 4
# run only if number of histogram points is above number of terms
FLAG1 = ((nz - n_terms) > 10)
# using kernel density functions from scikit-learn neighbors
# gaussian kernels will reflect more accurate distributions of the data
# with less sensitivity to sampling width than histograms (tophat kernels)
kde = sklearn.neighbors.KernelDensity(bandwidth=dz,kernel='gaussian')
kde.fit(z[:,None])
# kde score_samples outputs are normalized log density functions
hist = np.exp(kde.score_samples(z_full[:,None]) + np.log(n_max*dz))
# smooth histogram before determining differentials
gw = scipy.signal.gaussian(nz,4)
hist_smooth = scipy.signal.convolve(hist, gw/gw.sum(), mode='same')
# First differentials to find zero crossings
# histogram 1st differential
dhist = np.zeros((nz))
# forward differentiation for starting point
dhist[0] = hist_smooth[1] - hist_smooth[0]
# backward differentiation for end point
dhist[-1] = hist_smooth[-1] - hist_smooth[-2]
# centered differentiation for all others
dhist[1:-1] = (hist_smooth[2:] - hist_smooth[0:-2])/2.0
# find positive peaks above amplitude threshold (percent of max)
# by calculating the histogram differentials
# signal amplitude threshold greater than 10% of max or 5.5xbackground rate
AmpThreshold = 0.10
HistThreshold = np.max([5.5*N_BG, AmpThreshold*np.max(hist_smooth)])
n_peaks = np.count_nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
n_peaks = np.min([n_peaks,PEAKS])
peak_index, = np.nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
# initial indices for reducing to window
filt = np.arange(n_max)
filt_p1 = np.copy(filt)
filt_p2 = np.copy(filt_p1)
if FLAG1 and (n_peaks > 0):
# save initial indices for fitting all photons for confidence level
indices = ind.copy()
# sort peak index by amplitude of peaks (descending from max to min)
# and truncate to a finite number of peaks
sorted_peaks = np.argsort(hist[peak_index])[::-1]
peak_index = peak_index[sorted_peaks][:n_peaks]
# amplitude of the maximum peak
max_amp = hist[peak_index][0]
# cumulative probability distribution function of initial histogram
hist_cpdf = np.cumsum(hist/np.sum(hist))
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
Q1,Q3,P16,P84 = np.interp([0.25,0.75,0.16,0.84],hist_cpdf,z_full)
# create priors list
priors = []
lower_bound = []
upper_bound = []
for i,p in enumerate(peak_index):
if (FIT_TYPE == 'gaussian'):
# Fit Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
priors.append([hist[p],z_full[p],0.75*(Q3-Q1)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
lower_bound.extend([0,zmin,dz])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0])
elif (FIT_TYPE == 'general'):
# Fit Generalized Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
# p*: shape parameter = gaussian sqrt(2)
priors.append([hist[p],z_full[p],0.75*(Q3-Q1),np.sqrt(2)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
# shape: positive
lower_bound.extend([0,zmin,dz,0])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0,np.inf])
# run optimized curve fit with Levenberg-Marquardt algorithm
fit = fit_histogram(z_full,hist,priors,lower_bound,upper_bound,
FIT_TYPE=FIT_TYPE)
# number of iterations performed
n_iter = 1
# height fits and height fit errors
height = fit['height'].copy()
amplitude = fit['amplitude'].copy()
height_errors = fit['error'].copy()
# minimum and maximum heights
min_peak = np.min(fit['height'])
max_peak = np.max(fit['height'])
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(zmax-zmin)
# histogram fit
model = np.copy(fit['model'])
# histogram fit residuals
resid = np.copy(fit['residuals'])
# cumulative probability distribution function of initial histogram
cpdf = np.cumsum(fit['residuals']/np.sum(fit['residuals']))
# interpolate residuals to percentiles of interest for statistics
Q1,Q3,MEDIAN,P16,P84 = np.interp([0.25,0.75,0.5,0.16,0.84],cpdf,z_full)
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
IQR = 0.75*(Q3-Q1)
RDE = 0.50*(P84-P16)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
filt, = np.nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
# run only if number of points is above number of terms
n_rem = np.count_nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
nz = (np.max(z[filt])-np.min(z[filt]))//dz + 1
FLAG1 = ((nz - n_terms) > 10) & ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# iterate until there are no additional removed photons
while FLAG1 & FLAG2 & FLAG3:
# fit selected photons for window
x_filt,y_filt,z_filt,indices = (x[filt],y[filt],z[filt],ind[filt])
zmin,zmax = (z_filt.min(),z_filt.max())
z_full = np.arange(zmin,zmax+dz,dz)
nz = len(z_full)
# using kernel density functions from scikit-learn neighbors
# gaussian kernels will reflect more accurate distributions of the data
# with less sensitivity to sampling width than histograms (tophat kernels)
kde = sklearn.neighbors.KernelDensity(bandwidth=dz,kernel='gaussian')
kde.fit(z_filt[:,None])
# kde score_samples outputs are normalized log density functions
hist = np.exp(kde.score_samples(z_full[:,None]) + np.log(nz*dz))
# smooth histogram before determining differentials
gw = scipy.signal.gaussian(nz,4)
hist_smooth = scipy.signal.convolve(hist, gw/gw.sum(), mode='same')
# First differentials to find zero crossings
# histogram 1st differential
dhist = np.zeros((nz))
# forward differentiation for starting point
dhist[0] = hist_smooth[1] - hist_smooth[0]
# backward differentiation for end point
dhist[-1] = hist_smooth[-1] - hist_smooth[-2]
# centered differentiation for all others
dhist[1:-1] = (hist_smooth[2:] - hist_smooth[0:-2])/2.0
# find positive peaks above amplitude threshold (percent of max)
# by calculating the histogram differentials
# signal amplitude threshold greater than 10% of max or 5.5xbackground rate
HistThreshold = np.max([5.5*N_BG, AmpThreshold*np.max(hist_smooth)])
n_peaks = np.count_nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
n_peaks = np.min([n_peaks,PEAKS])
peak_index, = np.nonzero((np.sign(dhist[0:-1]) >= 0) & (np.sign(dhist[1:]) < 0) &
((hist_smooth[0:-1] > HistThreshold) | (hist_smooth[1:] > HistThreshold)))
# sort peak index by amplitude of peaks (descending from max to min)
# and truncate to a finite number of peaks
sorted_peaks = np.argsort(hist[peak_index])[::-1]
peak_index = peak_index[sorted_peaks][:n_peaks]
# amplitude of the maximum peak
max_amp = hist[peak_index][0]
# cumulative probability distribution function of initial histogram
hist_cpdf = np.cumsum(hist/np.sum(hist))
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
Q1,Q3,P16,P84 = np.interp([0.25,0.75,0.16,0.84],hist_cpdf,z_full)
# create priors list
priors = []
lower_bound = []
upper_bound = []
for i,p in enumerate(peak_index):
if (FIT_TYPE == 'gaussian'):
# Fit Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
priors.append([hist[p],z_full[p],0.75*(Q3-Q1)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
lower_bound.extend([0,zmin,dz])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0])
elif (FIT_TYPE == 'general'):
# Fit Generalized Gaussian functions to photon event histogram
# a*: amplitude of waveform
# r*: range from differential index
# w*: width as 0.75*IQR
# p*: shape parameter = gaussian sqrt(2)
priors.append([hist[p],z_full[p],0.75*(Q3-Q1),np.sqrt(2)])
# bounds of each parameter
# amplitude: 0 to histogram max+5.5xbackground rate
# range: zmin to zmax
# width: sz to half width of z
# shape: positive
lower_bound.extend([0,zmin,dz,0])
upper_bound.extend([max_amp+5.5*N_BG,zmax,(zmax-zmin)/2.0,np.inf])
# run optimized curve fit with Levenberg-Marquardt algorithm
fit = fit_histogram(z_full,hist,priors,lower_bound,upper_bound,
FIT_TYPE=FIT_TYPE)
# add to number of iterations performed
n_iter += 1
# height fits and height fit errors
height = fit['height'].copy()
amplitude = fit['amplitude'].copy()
height_errors = fit['error'].copy()
# minimum and maximum heights
min_peak = np.min(fit['height'])
max_peak = np.max(fit['height'])
# save MSE and DOF for error analysis
MSE = np.copy(fit['MSE'])
DOF = np.copy(fit['DOF'])
# Root mean square error
RMSE = np.sqrt(fit['MSE'])
# Normalized root mean square error
NRMSE = RMSE/(zmax-zmin)
# histogram fit
model = np.copy(fit['model'])
# histogram fit residuals
resid = np.copy(fit['residuals'])
# cumulative probability distribution function of initial histogram
cpdf = np.cumsum(resid/np.sum(resid))
# interpolate residuals to percentiles of interest for statistics
Q1,Q3,MEDIAN,P16,P84 = np.interp([0.25,0.75,0.5,0.16,0.84],cpdf,z_full)
# IQR: first and third quartiles (25th and 75th percentiles)
# RDE: 16th and 84th percentiles
IQR = 0.75*(Q3-Q1)
RDE = 0.50*(P84-P16)
# checking if any residuals are outside of the window
window = np.max([H_win_min,6.0*RDE,0.5*window_p1])
# filter out using median statistics and refit
filt_p2 = np.copy(filt_p1)
filt_p1 = np.copy(filt)
filt, = np.nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
# save iteration of window
window_p1 = np.copy(window)
# run only if number of points is above number of terms
n_rem = np.count_nonzero((z > (min_peak-window/2.0)) & (z < (max_peak+window/2.0)))
nz = (np.max(z[filt])-np.min(z[filt]))//dz + 1
FLAG1 = ((nz - n_terms) > 10) & ((n_rem - n_terms) > 10)
# maximum number of iterations to prevent infinite loops
FLAG2 = (n_iter <= ITERATE)
# compare indices over two iterations to prevent false stoppages
FLAG3 = (set(filt) != set(filt_p1)) | (set(filt_p1) != set(filt_p2))
# return reduced model fit
FLAG3 = (set(filt) == set(filt_p1))
if FLAG1 & FLAG3 & (window <= H_win_max) & (n_peaks > 0):
# calculate time with respect to mean of fit heights
t_full = -2*(z_full-np.mean(height))/c
# return values
return {'height':height, 'error':height_errors, 'amplitude':amplitude,
'MSE':MSE, 'NRMSE':NRMSE, 'residuals':resid, 'time': t_full,
'model':model, 'DOF':DOF, 'count':n_max, 'indices':indices,
'iterations':n_iter, 'window':window, 'RDE':RDE, 'peaks':n_peaks}
else:
raise ValueError('No valid fit found')
# PURPOSE: optimially fit a function to the photon event histogram
# with Levenberg-Marquardt algorithm
def fit_histogram(z, hist, priors, lower_bound, upper_bound, FIT_TYPE=None):
"""
Optimially fit a function to the photon event histogram with
Levenberg-Marquardt algorithm
"""
# create lists for the initial parameters
# parameters, and functions for each maximum
plist = []
flist = []
n_peaks = len(priors)
# function formatting string and parameter list for each fit type
if (FIT_TYPE == 'gaussian'):
# summation of gaussian functions with:
# peak amplitudes a*
# peak ranges r* (mean)
# peak widths w* (standard deviation)
# Gaussian function formatting string and parameters
function = 'a{0:d}*np.exp(-(x-r{0:d})**2.0/(2*w{0:d}**2))'
parameters = 'a{0:d}, r{0:d}, w{0:d}'
elif (FIT_TYPE == 'general'):
# summation of generalized gaussian functions with:
# peak amplitudes a*
# peak ranges r* (mean)
# peak widths w* (standard deviation)
# shape parameter p* (gaussian=sqrt(2))
# Generalized Gaussian function formatting string and parameters
function = 'a{0:d}*np.exp(-np.abs(x-r{0:d})**(p{0:d}**2.0)/(2*w{0:d}**2))'
parameters = 'a{0:d}, r{0:d}, w{0:d}, p{0:d}'
# fit decomposition functions to photon event histograms
for n,p in enumerate(priors):
# parameter list for peak n
plist.append(parameters.format(n))
# function definition list for peak n
flist.append(function.format(n))
# initial parameters for iteration n
p0 = np.concatenate((priors),axis=0)
# variables for iteration n
lambda_parameters = ', '.join([p for p in plist])
# full function for iteration n
lambda_function = ' + '.join([f for f in flist])
# tuple for parameter bounds (lower and upper)
bounds = (lower_bound, upper_bound)
# create lambda function for iteration n
# lambda functions are inline definitions
# with the parameters, variables and function definition
fsum = eval('lambda x, {0}: {1}'.format(lambda_parameters, lambda_function))
# optimized curve fit with Levenberg-Marquardt algorithm
# with the initial guess parameters p0 and parameter bounds
popt, pcov = scipy.optimize.curve_fit(fsum,z,hist,p0=p0,bounds=bounds)
# modelled histogram fit
model = fsum(z, *popt)
# 1 standard deviation errors in parameters
perr = np.sqrt(np.diag(pcov))
# number of points for fit and number of terms in fit
n_max = len(hist)
n_terms = len(p0)
# extract function outputs
if (FIT_TYPE == 'gaussian'):
# Gaussian function outputs
n = np.arange(n_peaks)*3
peak_amplitude = popt[n]
peak_height = popt[n+1]
peak_height_error = perr[n+1]
peak_stdev = popt[n+2]
elif (FIT_TYPE == 'general'):
# Generalized Gaussian function outputs
n = np.arange(n_peaks)*4
peak_amplitude = popt[n]
peak_height = popt[n+1]
peak_height_error = perr[n+1]
peak_stdev = popt[n+2]
# residual of fit
res = hist - model
# nu = Degrees of Freedom = number of measurements-number of parameters
nu = n_max - n_terms
# Mean square error
# MSE = (1/nu)*sum((Y-X*B)**2)
MSE = np.dot(np.transpose(hist - model),(hist - model))/nu
# Default is 95% confidence interval
alpha = 1.0 - (0.95)
# Student T-Distribution with D.O.F. nu
# t.ppf parallels tinv in matlab
tstar = scipy.stats.t.ppf(1.0-(alpha/2.0),nu)
return {'height':peak_height, 'amplitude':peak_amplitude,
'error':tstar*peak_height_error, 'stdev': peak_stdev,
'model':model, 'residuals':np.abs(res), 'MSE':MSE, 'DOF':nu}
# PURPOSE: calculate delta_time, latitude and longitude of the segment center
def fit_geolocation(var, distance_along_X, X_atc):
"""
Calculate the average of photon event variables by fitting with respect
to the center of the along-track coordinates
"""
# calculate x relative to centroid point
rel_x = distance_along_X - X_atc
# design matrix
XMAT = np.transpose([np.ones_like((distance_along_X)),rel_x])
# Standard Least-Squares fitting (the [0] denotes coefficients output)
beta_mat = np.linalg.lstsq(XMAT,var,rcond=-1)[0]
# return the fitted geolocation
return beta_mat[0]
# PURPOSE: calculate the average value from two segments
def segment_mean(var, **kwargs):
"""
Calculate the average value from two segments with possible invalid values
"""
# verify that data is masked array
if not isinstance(var, np.ma.MaskedArray):
var = np.ma.array(var)
# set default keyword arguments
kwargs.setdefault('fill_value',var.fill_value)
# verify mask is set for fill values or nan points
var.mask = ((var.data == var.fill_value) | np.isnan(var.data))
# update and replace fill values
var.data[var.mask] = var.fill_value
# calculate segment means
ave = np.ma.mean([var[0:-1],var[1:]],axis=0)
# update and replace fill values
ave.fill_value = kwargs['fill_value']
ave.data[ave.mask] = ave.fill_value
return ave
# PURPOSE: estimate mean and median first photon bias corrections
def calc_first_photon_bias(temporal_residuals,n_pulses,n_pixels,dead_time,dt,
METHOD='direct',ITERATE=20):
"""
Estimate mean and median first photon bias corrections
"""
# create a histogram of the temporal residuals
t_full = np.arange(temporal_residuals.min(),temporal_residuals.max()+dt,dt)
nt = len(t_full)
# number of input photon events
cnt = len(temporal_residuals)
# using kernel density functions from scikit-learn neighbors
# gaussian kernels will reflect more accurate distributions of the data
# with less sensitivity to sampling width than histograms (tophat kernels)
kde = sklearn.neighbors.KernelDensity(bandwidth=dt,kernel='gaussian')
kde.fit(temporal_residuals[:,None])
# kde score_samples outputs are normalized log density functions
hist = np.exp(kde.score_samples(t_full[:,None]) + np.log(cnt*dt))
N0_full = hist/(n_pulses*n_pixels)
# centroid of initial histogram
hist_centroid = np.sum(t_full*hist)/np.sum(hist)
# cumulative probability distribution function of initial histogram
hist_cpdf = np.cumsum(hist/ | np.sum(hist) | numpy.sum |
from collections import defaultdict
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from sklearn.metrics import f1_score
from torch.optim import AdamW
from torch.optim.lr_scheduler import StepLR
from tqdm import tqdm
from transformers import SqueezeBertForTokenClassification
from scripts.split_sentences import split_sentence
from utils import f1_semeval
class LitModule(pl.LightningModule):
def __init__(self, model, tokenizer, freeze, lr, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.tokenizer = tokenizer
self.learning_rate = lr
if freeze > 0:
for name, param in self.model.base_model.embeddings.named_parameters():
if 'classifier' not in name:
param.requires_grad = False
encoder = self.model.base_model.encoder
encoder_layers = encoder.layers \
if isinstance(self.model, SqueezeBertForTokenClassification) \
else encoder.layer
layers_size = len(encoder_layers)
freeze_layers = int(layers_size * freeze)
print(f'Freeze {freeze_layers}/{layers_size}')
for name, param in encoder_layers[:freeze_layers].named_parameters():
if 'classifier' not in name:
param.requires_grad = False
train_params = sum([np.prod(p.size()) for p in filter(lambda p: p.requires_grad, self.model.parameters())])
all_params = sum([np.prod(p.size()) for p in self.model.parameters()])
print(f'Train {train_params / all_params:.4%} params')
def forward(self, *args, **kwargs):
pred = self.model(*args, **kwargs)
return pred
def training_step(self, batch, batch_nb):
outputs = self(
batch['input_ids'],
token_type_ids=None,
attention_mask=batch['attention_mask'],
labels=batch['labels']
)
loss = outputs.loss
self.log('train_loss', loss.item(), logger=True, on_step=False, on_epoch=True)
return {'loss': loss}
def validation_step(self, batch, batch_nb):
outputs = self(
batch['input_ids'],
token_type_ids=None,
attention_mask=batch['attention_mask'],
labels=batch['labels']
)
loss = outputs.loss
self.log('val_loss', loss.item(), logger=True, on_step=False, on_epoch=True)
logits = outputs.logits.detach().cpu().numpy()
y_pred = np.argmax(logits, axis=-1).astype(int)
y_true = batch['labels'].to('cpu').numpy().astype(int)
no_pad_id = batch['attention_mask'].to('cpu').numpy().astype('bool')
f1_avg = list()
for i in range(len(y_true)):
y_pred_no_pad = y_pred[i][no_pad_id[i]]
y_true_no_pad = y_true[i][no_pad_id[i]]
f1 = f1_score(y_true_no_pad, y_pred_no_pad)
f1_avg.append(f1)
self.log('f1', np.mean(np.array(f1_avg)), prog_bar=True, logger=True, on_step=False, on_epoch=True)
pad_span = batch['pad_span'].to('cpu').numpy().astype(int)
offset_mapping = batch['offset_mapping'].to('cpu').numpy().astype(int)
sentence_id = batch['sentence_id'].to('cpu').numpy().astype(int)
sentence_offset = batch['offset'].to('cpu').numpy().astype(int)
f1_semeval_avg = list()
result_spans = defaultdict(lambda: defaultdict(list))
for i in range(len(y_true)):
true_spans = list(set(pad_span[i]) - {-1}) # remove padding
predicted_offsets = offset_mapping[i][y_pred[i].astype(bool)]
predicted_spans = [i for offset in predicted_offsets for i in range(offset[0], offset[1])]
f1 = f1_semeval(predicted_spans, true_spans)
f1_semeval_avg.append(f1)
result_spans[sentence_id[i]]['true'].extend(list(np.array(true_spans) + sentence_offset[i]))
result_spans[sentence_id[i]]['pred'].extend(list(np.array(predicted_spans) + sentence_offset[i]))
self.log('f1_spans', np.mean(np.array(f1_semeval_avg)), prog_bar=True, logger=True, on_step=False,
on_epoch=True)
return result_spans
def validation_epoch_end(self, outs):
"""
:param outs: list of results_span,
where result_spans[sentence_id] = {
'y_true': [spans], # eg [1,2,3,4]
'y_pred': [spans]
}
"""
result_spans = defaultdict(lambda: defaultdict(list))
for out in outs:
for sentence_id in out:
result_spans[sentence_id]['true'].extend(out[sentence_id]['true'])
result_spans[sentence_id]['pred'].extend(out[sentence_id]['pred'])
f1_semeval_avg = np.array([f1_semeval(result_spans[sentence_id]['true'], result_spans[sentence_id]['pred'])
for sentence_id in result_spans])
self.log('f1_spans_sentence', np.mean(f1_semeval_avg), prog_bar=True, logger=True)
def predict_dataframe(self, df, sentence_length):
self.model.eval()
self.model.cuda()
result = pd.DataFrame({'spans': pd.Series(np.zeros(len(df))).values})
for i, row in tqdm(df.iterrows(), total=len(df)):
texts, offsets, _ = split_sentence(self.tokenizer, row['text'], max_sentence_length=sentence_length)
predicted_spans = list()
for text, offset in zip(texts, offsets):
encoded = self.tokenizer(text, add_special_tokens=True, padding='max_length', truncation=True,
return_offsets_mapping=True, max_length=sentence_length)
item = {k: torch.tensor(v).unsqueeze(0).long().cuda() for k, v in encoded.items()}
output = self(item['input_ids'], token_type_ids=None, attention_mask=item['attention_mask'])
logits = output.logits.detach().cpu().numpy()
y_pred = np.argmax(logits, axis=-1).squeeze().astype(int)
predicted_offsets = np.array(encoded['offset_mapping'])[y_pred.astype(bool)]
spans = [i for offset in predicted_offsets for i in range(offset[0], offset[1])]
predicted_spans.extend(list( | np.array(spans) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.