prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 08:27:50 2021
@author: utric
"""
import numpy as np
from numpy import pi as π
import matplotlib.pyplot as plt
from scipy.constants import mu_0 as μ0
import pycoilib
plt.rc('xtick',labelsize=8)
plt.rc('ytick',labelsize=8)
plt.rc('lines', linewidth=2)
plt.rc('font', size=9)
if True:
print("---------------------------------------------------------------------")
print("----1\t- CODE VALIDATION : 2 lines")
print("----1.1\t- A wire divided in two")# 1.1
p0 =
|
np.array([0. , 0., 0.])
|
numpy.array
|
#!/usr/bin/python
# coding: UTF-8
#
# Author: <NAME>
# Contact: <EMAIL>
#
# Edited: 11/05/2017
#
# Feel free to contact for any information.
from __future__ import division, print_function
import logging
import numpy as np
import time
from scipy.interpolate import interp1d
from PyEMD.PyEMD.splines import *
class EMD:
"""
Empirical Mode Decomposition
*Note:*
Default and recommended package for EMD is EMD.py.
This is meant to provide with the same results as MATLAB version of EMD,
which is not necessarily the most efficient or numerically accurate.
Method of decomposing signal into Intrinsic Mode Functions (IMFs)
based on algorithm presented in Huang et al. [1].
Algorithm was validated with Rilling et al. [2] Matlab's version from 3.2007.
[1] <NAME> et al., "The empirical mode decomposition and the
Hilbert spectrum for non-linear and non stationary time series
analysis", Proc. Royal Soc. London A, Vol. 454, pp. 903-995, 1998
[2] <NAME>, <NAME> and <NAME>, "On Empirical Mode
Decomposition and its algorithms", IEEE-EURASIP Workshop on
Nonlinear Signal and Image Processing NSIP-03, Grado (I), June 2003
"""
logger = logging.getLogger(__name__)
def __init__(self):
self.splineKind = 'cubic'
self.nbsym = 2
self.reduceScale = 1.
self.maxIteration = 500
self.scaleFactor = 100
self.FIXE = 0
self.FIXE_H = 0
self.stop1 = 0.05
self.stop2 = 0.5
self.stop3 = 0.05
self.DTYPE = np.float64
self.MAX_ITERATION = 1000
self.TIME = False
def extractMaxMinSpline(self, T, S):
"""
Input:
-----------------
S - Input signal array. Should be 1D.
T - Time array. If none passed numpy arange is created.
Output:
-----------------
maxSpline - Upper envelope of signal S.
minSpline - Bottom envelope of signal S.
maxExtrema - Position (1st row) and values (2nd row) of maxima.
minExtrema - Position (1st row) and values (2nd row) of minma.
"""
# Get indexes of extrema
maxPos, maxVal, minPos, minVal, _ = self.findExtrema(T, S)
if len(maxPos) + len(minPos) < 3: return [-1]*4
# Extrapolation of signal (ober boundaries)
maxExtrema, minExtrema = self.preparePoints(S, T, maxPos, maxVal, minPos, minVal)
_, maxSpline = self.splinePoints(T, maxExtrema, self.splineKind)
_, minSpline = self.splinePoints(T, minExtrema, self.splineKind)
return maxSpline, minSpline, maxExtrema, minExtrema
def preparePoints(self, S, T, maxPos, maxVal, minPos, minVal):
"""
Adds to signal extrema according to mirror technique.
Number of added points depends on nbsym variable.
Input:
---------
S: Signal (1D numpy array).
T: Timeline (1D numpy array).
maxPos: sorted time positions of maxima.
maxVal: signal values at maxPos positions.
minPos: sorted time positions of minima.
minVal: signal values at minPos positions.
Output:
---------
minExtrema: Position (1st row) and values (2nd row) of minima.
minExtrema: Position (1st row) and values (2nd row) of maxima.
"""
# Find indices for time array of extrema
indmin = np.array([np.nonzero(T==t)[0] for t in minPos]).flatten()
indmax = np.array([np.nonzero(T==t)[0] for t in maxPos]).flatten()
# Local variables
nbsym = self.nbsym
endMin, endMax = len(minPos), len(maxPos)
####################################
# Left bound - mirror nbsym points to the left
if indmax[0] < indmin[0]:
if S[0] > S[indmin[0]]:
lmax = indmax[1:min(endMax,nbsym+1)][::-1]
lmin = indmin[0:min(endMin,nbsym+0)][::-1]
lsym = indmax[0]
else:
lmax = indmax[0:min(endMax,nbsym)][::-1]
lmin = np.append(indmin[0:min(endMin,nbsym-1)][::-1],0)
lsym = 0
else:
if S[0] < S[indmax[0]]:
lmax = indmax[0:min(endMax,nbsym+0)][::-1]
lmin = indmin[1:min(endMin,nbsym+1)][::-1]
lsym = indmin[0]
else:
lmax = np.append(indmax[0:min(endMax,nbsym-1)][::-1],0)
lmin = indmin[0:min(endMin,nbsym)][::-1]
lsym = 0
####################################
# Right bound - mirror nbsym points to the right
if indmax[-1] < indmin[-1]:
if S[-1] < S[indmax[-1]]:
rmax = indmax[max(endMax-nbsym,0):][::-1]
rmin = indmin[max(endMin-nbsym-1,0):-1][::-1]
rsym = indmin[-1]
else:
rmax = np.append(indmax[max(endMax-nbsym+1,0):], len(S)-1)[::-1]
rmin = indmin[max(endMin-nbsym,0):][::-1]
rsym = len(S)-1
else:
if S[-1] > S[indmin[-1]]:
rmax = indmax[max(endMax-nbsym-1,0):-1][::-1]
rmin = indmin[max(endMin-nbsym,0):][::-1]
rsym = indmax[-1]
else:
rmax = indmax[max(endMax-nbsym,0):][::-1]
rmin = np.append(indmin[max(endMin-nbsym+1,0):], len(S)-1)[::-1]
rsym = len(S)-1
# In case any array missing
if not lmin.size: lmin = indmin
if not rmin.size: rmin = indmin
if not lmax.size: lmax = indmax
if not rmax.size: rmax = indmax
# Mirror points
tlmin = 2*T[lsym]-T[lmin]
tlmax = 2*T[lsym]-T[lmax]
trmin = 2*T[rsym]-T[rmin]
trmax = 2*T[rsym]-T[rmax]
# If mirrored points are not outside passed time range.
if tlmin[0] > T[0] or tlmax[0] > T[0]:
if lsym == indmax[0]:
lmax = indmax[0:min(endMax,nbsym)][::-1]
else:
lmin = indmin[0:min(endMin,nbsym)][::-1]
if lsym == 0:
raise Exception('bug')
lsym = 0
tlmin = 2*T[lsym]-T[lmin]
tlmax = 2*T[lsym]-T[lmax]
if trmin[-1] < T[-1] or trmax[-1] < T[-1]:
if rsym == indmax[-1]:
rmax = indmax[max(endMax-nbsym,0):][::-1]
else:
rmin = indmin[max(endMin-nbsym,0):][::-1]
if rsym == len(S)-1:
raise Exception('bug')
rsym = len(S)-1
trmin = 2*T[rsym]-T[rmin]
trmax = 2*T[rsym]-T[rmax]
zlmax = S[lmax]
zlmin = S[lmin]
zrmax = S[rmax]
zrmin = S[rmin]
tmin = np.append(tlmin, np.append(T[indmin], trmin))
tmax = np.append(tlmax, np.append(T[indmax], trmax))
zmin = np.append(zlmin, np.append(S[indmin], zrmin))
zmax = np.append(zlmax, np.append(S[indmax], zrmax))
maxExtrema = np.array([tmax, zmax], dtype=self.DTYPE)
minExtrema = np.array([tmin, zmin], dtype=self.DTYPE)
# Make double sure, that each extremum is significant
maxExtrema = np.delete(maxExtrema, np.where(maxExtrema[0,1:]==maxExtrema[0,:-1]),axis=1)
minExtrema = np.delete(minExtrema, np.where(minExtrema[0,1:]==minExtrema[0,:-1]),axis=1)
return maxExtrema, minExtrema
def splinePoints(self, T, extrema, splineKind):
"""
Constructs spline over given points.
Input:
---------
T: Time array.
extrema: Poistion (1st row) and values (2nd row) of points.
splineKind: Type of spline.
Output:
---------
T: Poistion array.
spline: Spline over the given points.
"""
kind = splineKind.lower()
t = T[np.r_[T>=extrema[0,0]] & np.r_[T<=extrema[0,-1]]]
if t.dtype != self.DTYPE: self.logger.error('t.dtype: '+str(t.dtype))
if extrema.dtype != self.DTYPE: self.logger.error('extrema.dtype: '+str(xtrema.dtype))
if kind == "akima":
return t, akima(extrema[0], extrema[1], t)
elif kind == 'cubic':
if extrema.shape[1]>3:
return t, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)
else:
return self.cubicSpline_3points(T, extrema)
elif kind in ['slinear', 'quadratic', 'linear']:
return T, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)
else:
raise ValueError("No such interpolation method!")
def cubicSpline_3points(self, T, extrema):
"""
Apperently scipy.interpolate.interp1d does not support
cubic spline for less than 4 points.
"""
x0, x1, x2 = extrema[0]
y0, y1, y2 = extrema[1]
x1x0, x2x1 = x1-x0, x2-x1
y1y0, y2y1 = y1-y0, y2-y1
_x1x0, _x2x1 = 1./x1x0, 1./x2x1
m11, m12, m13= 2*_x1x0, _x1x0, 0
m21, m22, m23 = _x1x0, 2.*(_x1x0+_x2x1), _x2x1
m31, m32, m33 = 0, _x2x1, 2.*_x2x1
v1 = 3*y1y0*_x1x0*_x1x0
v3 = 3*y2y1*_x2x1*_x2x1
v2 = v1+v3
M = np.matrix([[m11,m12,m13],[m21,m22,m23],[m31,m32,m33]])
v = np.matrix([v1,v2,v3]).T
k = np.array(np.linalg.inv(M)*v)
a1 = k[0]*x1x0 - y1y0
b1 =-k[1]*x1x0 + y1y0
a2 = k[1]*x2x1 - y2y1
b2 =-k[2]*x2x1 + y2y1
t = T[np.r_[T>=x0] & np.r_[T<=x2]]
t1 = (T[np.r_[T>=x0]&np.r_[T< x1]] - x0)/x1x0
t2 = (T[np.r_[T>=x1]&np.r_[T<=x2]] - x1)/x2x1
t11, t22 = 1.-t1, 1.-t2
q1 = t11*y0 + t1*y1 + t1*t11*(a1*t11 + b1*t1)
q2 = t22*y1 + t2*y2 + t2*t22*(a2*t22 + b2*t2)
q = np.append(q1,q2)
return t, q.astype(self.DTYPE)
@classmethod
def findExtrema(cls, t, s):
"""
Finds extrema and zero-crossings.
Input:
---------
S: Signal.
T: Time array.
Output:
---------
localMaxPos: Time positions of maxima.
localMaxVal: Values of signal at localMaxPos positions.
localMinPos: Time positions of minima.
localMinVal: Values of signal at localMinPos positions.
indzer: Indexes of zero crossings.
"""
# Finds indexes of zero-crossings
s1, s2 = s[:-1], s[1:]
indzer = np.nonzero(s1*s2<0)[0]
if
|
np.any(s==0)
|
numpy.any
|
import torch
import numpy as np
import csv
from base_config import args, result_template, evaluation_folder
from base_read_data import domain_slot_type_map, tokenizer, domain_slot_list, approximate_equal_test
use_variant = args['use_variant']
def batch_eval(batch_predict_label_dict, batch):
result = {}
for domain_slot in domain_slot_list:
confusion_mat = np.zeros([5, len(batch_predict_label_dict[domain_slot])]) # 4 for tp, tn, fp, fn, pfi
predict_result = batch_predict_label_dict[domain_slot]
# 注意,此处应该使用cumulative label
label_result = [item[domain_slot] if domain_slot in item else 'none' for item in batch[4]]
assert len(label_result) == len(predict_result)
for idx in range(len(predict_result)):
predict, label = predict_result[idx], label_result[idx]
equal = approximate_equal_test(predict, label, use_variant)
if label != 'none' and predict != 'none' and equal:
confusion_mat[0, idx] = 1
elif label == 'none' and predict == 'none':
confusion_mat[1, idx] = 1
elif label == 'none' and predict != 'none':
confusion_mat[2, idx] = 1
elif label != 'none' and predict == 'none':
confusion_mat[3, idx] = 1
elif label != 'none' and predict != 'none' and not equal:
confusion_mat[4, idx] = 1
else:
raise ValueError(' ')
result[domain_slot] = confusion_mat
return result
def comprehensive_eval(result_list, data_type, process_name, epoch):
data_size = -1
reorganized_result_dict, slot_result_dict, domain_result_dict = {}, {}, {},
for domain_slot in domain_slot_list:
reorganized_result_dict[domain_slot] = []
for batch_result in result_list:
for domain_slot in batch_result:
reorganized_result_dict[domain_slot].append(batch_result[domain_slot])
for domain_slot in domain_slot_list:
reorganized_result_dict[domain_slot] = np.concatenate(reorganized_result_dict[domain_slot], axis=1)
data_size = len(reorganized_result_dict[domain_slot][0])
general_result = np.ones(data_size)
for domain_slot in domain_slot_list:
domain_result_dict[domain_slot.strip().split('-')[0]] = np.ones(data_size)
# data structure of reorganized_result {domain_slot_name: ndarray} ndarray: [sample_size, five prediction type]
# tp, tn, fp, fn, plfp (positive label false prediction)
for domain_slot in domain_slot_list:
slot_tp, slot_tn = reorganized_result_dict[domain_slot][0, :], reorganized_result_dict[domain_slot][1, :]
slot_correct = np.logical_or(slot_tn, slot_tp)
general_result *= slot_correct
domain = domain_slot.strip().split('-')[0]
domain_result_dict[domain] *= slot_correct
general_acc = np.sum(general_result) / len(general_result)
domain_acc_dict = {}
for domain in domain_result_dict:
domain_acc_dict[domain] = np.sum(domain_result_dict[domain]) / len(domain_result_dict[domain])
write_rows = []
for config_item in args:
write_rows.append([config_item, args[config_item]])
result_rows = []
head = ['category', 'accuracy', 'recall', 'precision', 'tp', 'tn', 'fp', 'fn', 'plfp']
result_rows.append(head)
general_acc = str(round(general_acc*100, 2)) + "%"
result_rows.append(['general', general_acc])
for domain in domain_acc_dict:
result_rows.append([domain, str(round(domain_acc_dict[domain]*100, 2))+"%"])
for domain_slot in domain_slot_list:
result = reorganized_result_dict[domain_slot]
tp, tn, fp, fn, plfp = result[0, :], result[1, :], result[2, :], result[3, :], result[4, :]
recall = str(round(100*np.sum(tp) / (np.sum(tp) + np.sum(fn) +
|
np.sum(plfp)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 16:41:11 2019
@author: ConnorK
"""
import numpy as np
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import mean_squared_error
import csv
class RegressionModel():
def __init__(self):
model = self.run_regression()
self.model = model
def run_regression(self):
## Read data
with open('node_data.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
node_position = []
node_degree = []
node_centrality = []
node_centrality_ratio = []
node_infection = []
ball_dist = []
for row in csv_reader:
if line_count == 0:
node_position = list(map(float,row))
if line_count == 1:
node_degree = list(map(float,row))
if line_count == 2:
node_centrality = list(map(float,row))
if line_count % 2 == 1 and line_count > 2:
node_infection.append(list(map(float,row)))
ratio = np.multiply(np.array(node_infection[-1]),
|
np.array(node_centrality)
|
numpy.array
|
#Align GiSAXS sample
import numpy as np
def run_giwaxs(t=1): #2020C1
# define names of samples on sample bar
sample_list = ['5_1_MeDPP_glassOTS_PhMe_none','5_2_MeDPP_glassOTS_PhMe_60minVSADCM','SC1_8_60minVSADCM','SC1_8_PEDOTPSS']
x_list = [47200.000,37200.000,23700.000,15700.000]
assert len(x_list) == len(sample_list), f'Sample name/position list is borked'
angle_arc = np.array([0.08, 0.1, 0.15, 0.2]) # incident angles
waxs_angle_array =
|
np.linspace(0, 19.5, 4)
|
numpy.linspace
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import copy
from itertools import combinations_with_replacement
import time
from pathlib import Path
import numpy as np
from monty.serialization import dumpfn, loadfn
from pymatgen.core.structure import Molecule
from pymatgen.analysis.graphs import MoleculeGraph, MolGraphSplitError
from pymatgen.analysis.local_env import OpenBabelNN, metal_edge_extender
def combine_mol_graphs(molgraph_1: MoleculeGraph, molgraph_2: MoleculeGraph) -> MoleculeGraph:
"""
Create a combined MoleculeGraph based on two initial MoleculeGraphs.
Args:
molgraph_1 (MoleculeGraph)
molgraph_2 (MoleculeGraph)
Returns:
copy_1 (MoleculeGraph)
"""
# This isn't strictly necessary, but we center both molecules and shift the second
# For 3D structure generation, having the two molecules appropriately separated is
# helpful
radius_1 = np.amax(molgraph_1.molecule.distance_matrix)
radius_2 =
|
np.amax(molgraph_2.molecule.distance_matrix)
|
numpy.amax
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 16:40:47 2019
@author: aimachine
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 14:38:04 2019
@author: aimachine
"""
import numpy as np
import os
#from IPython.display import clear_output
from stardist.models import Config3D, StarDist3D
from stardist import Rays_GoldenSpiral,calculate_extents
from scipy.ndimage import binary_fill_holes
from scipy.ndimage.measurements import find_objects
from scipy.ndimage import binary_dilation
from csbdeep.utils import normalize
import glob
import cv2
from csbdeep.io import load_training_data
from csbdeep.utils import axes_dict
from csbdeep.models import Config, CARE
from tifffile import imread
from tensorflow.keras.utils import Sequence
from csbdeep.data import RawData, create_patches
from skimage.measure import label, regionprops
from scipy import ndimage
from tqdm import tqdm
import matplotlib.pyplot as plt
from pathlib import Path
from tifffile import imread, imwrite
from csbdeep.utils import plot_history
def _raise(e):
raise e
def _fill_label_holes(lbl_img, **kwargs):
lbl_img_filled = np.zeros_like(lbl_img)
for l in (set(np.unique(lbl_img)) - set([0])):
mask = lbl_img==l
mask_filled = binary_fill_holes(mask,**kwargs)
lbl_img_filled[mask_filled] = l
return lbl_img_filled
def fill_label_holes(lbl_img, **kwargs):
"""Fill small holes in label image."""
# TODO: refactor 'fill_label_holes' and 'edt_prob' to share code
def grow(sl,interior):
return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
def shrink(interior):
return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
objects = find_objects(lbl_img)
lbl_img_filled = np.zeros_like(lbl_img)
for i,sl in enumerate(objects,1):
if sl is None: continue
interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
shrink_slice = shrink(interior)
grown_mask = lbl_img[grow(sl,interior)]==i
mask_filled = binary_fill_holes(grown_mask,**kwargs)[shrink_slice]
lbl_img_filled[sl][mask_filled] = i
return lbl_img_filled
def dilate_label_holes(lbl_img, iterations):
lbl_img_filled = np.zeros_like(lbl_img)
for l in (range(np.min(lbl_img), np.max(lbl_img) + 1)):
mask = lbl_img==l
mask_filled = binary_dilation(mask,iterations = iterations)
lbl_img_filled[mask_filled] = l
return lbl_img_filled
def erode_labels(segmentation, erosion_iterations= 2):
# create empty list where the eroded masks can be saved to
list_of_eroded_masks = list()
regions = regionprops(segmentation)
erode = np.zeros(segmentation.shape)
def erode_mask(segmentation_labels, label_id, erosion_iterations):
only_current_label_id = np.where(segmentation_labels == label_id, 1, 0)
eroded = ndimage.binary_erosion(only_current_label_id, iterations = erosion_iterations)
relabeled_eroded = np.where(eroded == 1, label_id, 0)
return(relabeled_eroded)
for i in range(len(regions)):
label_id = regions[i].label
erode = erode + erode_mask(segmentation, label_id, erosion_iterations)
# convert list of numpy arrays to stacked numpy array
return erode
class SmartSeeds3D(object):
def __init__(self, base_dir, npz_filename, model_name, model_dir, n_patches_per_image, raw_dir = '/Raw/', real_mask_dir = '/real_mask/', binary_mask_dir = '/binary_mask/',
val_raw_dir = '/val_raw/', val_real_mask_dir = '/val_real_mask/', n_channel_in = 1, downsample_factor = 1, backbone = 'resnet', load_data_sequence = True, train_unet = True, train_star = True, generate_npz = True,
validation_split = 0.01, erosion_iterations = 2, patch_x=256, patch_y=256, patch_z = 16, grid_x = 1, grid_y = 1, annisotropy = (1,1,1), use_gpu = True, batch_size = 4, depth = 3, kern_size = 3, startfilter = 48, n_rays = 16, epochs = 400, learning_rate = 0.0001):
self.npz_filename = npz_filename
self.base_dir = base_dir
self.downsample_factor = downsample_factor
self.model_dir = model_dir
self.backbone = backbone
self.raw_dir = raw_dir
self.real_mask_dir = real_mask_dir
self.val_raw_dir = val_raw_dir
self.val_real_mask_dir = val_real_mask_dir
self.binary_mask_dir = binary_mask_dir
self.generate_npz = generate_npz
self.annisotropy = annisotropy
self.train_unet = train_unet
self.train_star = train_star
self.model_name = model_name
self.epochs = epochs
self.learning_rate = learning_rate
self.depth = depth
self.n_channel_in = n_channel_in
self.n_rays = n_rays
self.erosion_iterations = erosion_iterations
self.kern_size = kern_size
self.patch_x = patch_x
self.patch_y = patch_y
self.patch_z = patch_z
self.grid_x = grid_x
self.grid_y = grid_y
self.validation_split = validation_split
self.batch_size = batch_size
self.use_gpu = use_gpu
self.startfilter = startfilter
self.n_patches_per_image = n_patches_per_image
self.load_data_sequence = load_data_sequence
self.Train()
class DataSequencer(Sequence):
def __init__(self, files, axis_norm, Normalize = True, labelMe = False):
super().__init__()
self.files = files
self.axis_norm = axis_norm
self.labelMe = labelMe
self.Normalize = Normalize
def __len__(self):
return len(self.files)
def __getitem__(self, i):
#Read Raw images
if self.Normalize == True:
x = read_float(self.files[i])
x = normalize(x,1,99.8,axis= self.axis_norm)
x = x
if self.labelMe == True:
#Read Label images
x = read_int(self.files[i])
x = x
return x
def Train(self):
Raw = sorted(glob.glob(self.base_dir + self.raw_dir + '*.tif'))
Path(self.base_dir + self.binary_mask_dir).mkdir(exist_ok=True)
Path(self.base_dir + self.real_mask_dir).mkdir(exist_ok=True)
RealMask = sorted(glob.glob(self.base_dir + self.real_mask_dir + '*.tif'))
ValRaw = sorted(glob.glob(self.base_dir + self.val_raw_dir + '*.tif'))
ValRealMask = sorted(glob.glob(self.base_dir + self.val_real_mask_dir + '*.tif'))
Mask = sorted(glob.glob(self.base_dir + self.binary_mask_dir + '*.tif'))
print('Instance segmentation masks:', len(RealMask))
print('Semantic segmentation masks:', len(Mask))
if self.train_star and len(Mask) > 0 and len(RealMask) < len(Mask):
print('Making labels')
Mask = sorted(glob.glob(self.base_dir + self.binary_mask_dir + '*.tif'))
for fname in Mask:
image = imread(fname)
Name = os.path.basename(os.path.splitext(fname)[0])
if np.max(image) == 1:
image = image * 255
Binaryimage = label(image)
imwrite((self.base_dir + self.real_mask_dir + Name + '.tif'), Binaryimage.astype('uint16'))
if self.train_unet and len(RealMask) > 0 and len(Mask) < len(RealMask):
print('Generating Binary images')
RealfilesMask = sorted(glob.glob(self.base_dir + self.real_mask_dir + '*tif'))
for fname in RealfilesMask:
image = imread(fname)
if self.erosion_iterations > 0:
image = erode_labels(image.astype('uint16'), self.erosion_iterations)
Name = os.path.basename(os.path.splitext(fname)[0])
Binaryimage = image > 0
imwrite((self.base_dir + self.binary_mask_dir + Name + '.tif'), Binaryimage.astype('uint16'))
if self.generate_npz:
raw_data = RawData.from_folder (
basepath = self.base_dir,
source_dirs = [self.raw_dir],
target_dir = self.binary_mask_dir,
axes = 'ZYX',
)
X, Y, XY_axes = create_patches (
raw_data = raw_data,
patch_size = (self.patch_z,self.patch_y,self.patch_x),
n_patches_per_image = self.n_patches_per_image,
save_file = self.base_dir + self.npz_filename + '.npz',
)
# Training UNET model
if self.train_unet:
print('Training UNET model')
load_path = self.base_dir + self.npz_filename + '.npz'
(X,Y), (X_val,Y_val), axes = load_training_data(load_path, validation_split=self.validation_split, verbose=True)
c = axes_dict(axes)['C']
n_channel_in, n_channel_out = X.shape[c], Y.shape[c]
config = Config(axes, n_channel_in, n_channel_out, unet_n_depth= self.depth,train_epochs= self.epochs, train_batch_size = self.batch_size, unet_n_first = self.startfilter, train_loss = 'mse', unet_kern_size = self.kern_size, train_learning_rate = self.learning_rate, train_reduce_lr={'patience': 5, 'factor': 0.5})
print(config)
vars(config)
model = CARE(config , name = 'UNET' + self.model_name, basedir = self.model_dir)
if os.path.exists(self.model_dir + 'UNET' + self.model_name + '/' + 'weights_now.h5'):
print('Loading checkpoint model')
model.load_weights(self.model_dir + 'UNET' + self.model_name + '/' + 'weights_now.h5')
if os.path.exists(self.model_dir + 'UNET' + self.model_name + '/' + 'weights_last.h5'):
print('Loading checkpoint model')
model.load_weights(self.model_dir + 'UNET' + self.model_name + '/' + 'weights_last.h5')
if os.path.exists(self.model_dir + 'UNET' + self.model_name + '/' + 'weights_best.h5'):
print('Loading checkpoint model')
model.load_weights(self.model_dir + 'UNET' + self.model_name + '/' + 'weights_best.h5')
history = model.train(X,Y, validation_data=(X_val,Y_val))
print(sorted(list(history.history.keys())))
plt.figure(figsize=(16,5))
plot_history(history,['loss','val_loss'],['mse','val_mse','mae','val_mae'])
if self.train_star:
print('Training StarDistModel model with' , self.backbone , 'backbone')
self.axis_norm = (0,1,2)
if self.load_data_sequence == False:
assert len(Raw) > 1, "not enough training data"
print(len(Raw))
rng = np.random.RandomState(42)
ind = rng.permutation(len(Raw))
X_train = list(map(read_float,Raw))
Y_train = list(map(read_int,RealMask))
self.Y = [label(DownsampleData(y, self.downsample_factor)) for y in tqdm(Y_train)]
self.X = [normalize(DownsampleData(x, self.downsample_factor),1,99.8,axis=self.axis_norm) for x in tqdm(X_train)]
n_val = max(1, int(round(0.15 * len(ind))))
ind_train, ind_val = ind[:-n_val], ind[-n_val:]
self.X_val, self.Y_val = [self.X[i] for i in ind_val] , [self.Y[i] for i in ind_val]
self.X_trn, self.Y_trn = [self.X[i] for i in ind_train], [self.Y[i] for i in ind_train]
print('number of images: %3d' % len(self.X))
print('- training: %3d' % len(self.X_trn))
print('- validation: %3d' % len(self.X_val))
if self.load_data_sequence:
self.X_trn = self.DataSequencer(Raw, self.axis_norm, Normalize = True, labelMe = False)
self.Y_trn = self.DataSequencer(RealMask, self.axis_norm, Normalize = False, labelMe = True)
self.X_val = self.DataSequencer(ValRaw, self.axis_norm, Normalize = True, labelMe = False)
self.Y_val = self.DataSequencer(ValRealMask, self.axis_norm, Normalize = False, labelMe = True)
self.train_sample_cache = False
print(Config3D.__doc__)
extents = calculate_extents(self.Y_trn)
self.annisotropy = tuple(np.max(extents) / extents)
rays = Rays_GoldenSpiral(self.n_rays, anisotropy=self.annisotropy)
if self.backbone == 'resnet':
conf = Config3D (
rays = rays,
anisotropy = self.annisotropy,
backbone = self.backbone,
train_epochs = self.epochs,
train_learning_rate = self.learning_rate,
resnet_n_blocks = self.depth,
train_checkpoint = self.model_dir + self.model_name +'.h5',
resnet_kernel_size = (self.kern_size, self.kern_size, self.kern_size),
train_patch_size = (self.patch_z, self.patch_x, self.patch_y ),
train_batch_size = self.batch_size,
resnet_n_filter_base = self.startfilter,
train_dist_loss = 'mse',
grid = (1,self.grid_y,self.grid_x),
use_gpu = self.use_gpu,
n_channel_in = self.n_channel_in
)
if self.backbone == 'unet':
conf = Config3D (
rays = rays,
anisotropy = self.annisotropy,
backbone = self.backbone,
train_epochs = self.epochs,
train_learning_rate = self.learning_rate,
unet_n_depth = self.depth,
train_checkpoint = self.model_dir + self.model_name +'.h5',
unet_kernel_size = (self.kern_size, self.kern_size, self.kern_size),
train_patch_size = (self.patch_z, self.patch_x, self.patch_y ),
train_batch_size = self.batch_size,
unet_n_filter_base = self.startfilter,
train_dist_loss = 'mse',
grid = (1,self.grid_y,self.grid_x),
use_gpu = self.use_gpu,
n_channel_in = self.n_channel_in,
train_sample_cache = False
)
print(conf)
vars(conf)
Starmodel = StarDist3D(conf, name=self.model_name, basedir=self.model_dir)
print(Starmodel._axes_tile_overlap('ZYX'), os.path.exists(self.model_dir + self.model_name + '/' + 'weights_now.h5'))
if os.path.exists(self.model_dir + self.model_name + '/' + 'weights_now.h5'):
print('Loading checkpoint model')
Starmodel.load_weights(self.model_dir + self.model_name + '/' + 'weights_now.h5')
if os.path.exists(self.model_dir + self.model_name + '/' + 'weights_last.h5'):
print('Loading checkpoint model')
Starmodel.load_weights(self.model_dir + self.model_name + '/' + 'weights_last.h5')
if os.path.exists(self.model_dir + self.model_name + '/' + 'weights_best.h5'):
print('Loading checkpoint model')
Starmodel.load_weights(self.model_dir + self.model_name + '/' + 'weights_best.h5')
historyStar = Starmodel.train(self.X_trn, self.Y_trn, validation_data=(self.X_val,self.Y_val), epochs = self.epochs)
print(sorted(list(historyStar.history.keys())))
plt.figure(figsize=(16,5))
plot_history(historyStar,['loss','val_loss'],['dist_relevant_mae','val_dist_relevant_mae','dist_relevant_mse','val_dist_relevant_mse'])
def read_float(fname):
return imread(fname).astype('float32')
def read_int(fname):
return imread(fname).astype('uint16')
def DownsampleData(image, downsample_factor):
scale_percent = int(100/downsample_factor) # percent of original size
width = int(image.shape[2] * scale_percent / 100)
height = int(image.shape[1] * scale_percent / 100)
dim = (width, height)
smallimage =
|
np.zeros([image.shape[0], height,width])
|
numpy.zeros
|
# Copyright (C) 2019 GreenWaves Technologies
# All rights reserved.
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import logging
import numpy as np
from execution.kernels.conv2d import conv2d
from execution.kernels.linear import linear
from execution.kernels.misc import activation, concat
from execution.kernels.pool import av_pool, max_pool
from graph.dim import (Conv2DFilterDim, DilationDim, Dim, FcFilterDim, PadDim,
PoolFilterDim, StrideDim)
from graph.types import (ActivationParameters, ConcatParameters,
Conv2DParameters, FcParameters, PoolingParameters)
from quantization.qtype import QType
from quantization.quantization_record import (FilterQuantizationRecord,
QuantizationRecord)
def test_conf2d_normal():
weights = np.arange(9).reshape([1, 1, 3, 3])
filt = Conv2DFilterDim(3, 3, 1, 1)
stride = StrideDim(1)
pad = PadDim(0)
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation)
input_ = np.arange(16).reshape([1, 4, 4])
in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
details = {}
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, details=details)
# assert details['max_acc'] == 438.0 and details['min_acc'] == 258.0
assert np.array_equal(output_, [[[258, 294], [402, 438]]])
def test_conf2d_depth():
# TF Lite depthwise convolution
weights = np.arange(9).reshape([3, 3])
weights = np.repeat(weights, 2).reshape([1, 3, 3, 2])
filt = Conv2DFilterDim(3, 3, 2, 1).impose_order(["in_c", "h", "w", "out_c"])
stride = StrideDim(1)
pad = PadDim(0)
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation, groups=1,
multiplier=2, tf_depthwise=True)
input_ = np.arange(16).reshape([1, 4, 4])
in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None)
assert np.array_equal(output_, [[[258, 294], [402, 438]], [[258, 294], [402, 438]]])
def test_conf2d_depth_q():
calc_q = QType(32, 9, True)
biases_q = acc_q = out_q = QType(16, 4, True)
weights_q = QType(16, 4, True)
in_q = QType(16, 5, True)
# TF Lite depthwise convolution
biases = np.full([2], 0.5)
qbiases = biases_q.quantize(biases)
weights = np.full([3, 3], 0.5)
weights = np.repeat(weights, 2).reshape([1, 3, 3, 2])
qweights = weights_q.quantize(weights)
filt = Conv2DFilterDim(3, 3, 2, 1).impose_order(["in_c", "h", "w", "out_c"])
stride = StrideDim(1)
pad = PadDim(0)
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation,
groups=1, multiplier=2, tf_depthwise=True)
qrec = FilterQuantizationRecord(in_qs=[in_q], out_qs=[out_q], weights_q=weights_q,
biases_q=biases_q, acc_q=acc_q, calc_q=calc_q)
input_ = np.full([1, 4, 4], 2)
qinput_ = in_q.quantize(input_)
in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, biases)
qoutput_ = conv2d(params, in_dims, out_dims[0], qinput_, qweights, qbiases, qrec=qrec)
dqoutput_ = out_q.dequantize(qoutput_)
assert np.array_equal(output_, dqoutput_)
def test_conf2d_depth2():
# TF Lite depthwise convolution
weights = np.arange(9).reshape([3, 3])
weights = np.repeat(weights, 4).reshape([1, 3, 3, 4])
filt = Conv2DFilterDim(3, 3, 4, 1).impose_order(["in_c", "h", "w", "out_c"])
stride = StrideDim(1)
pad = PadDim(0)
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation, groups=2,
multiplier=2, tf_depthwise=True)
input_ = np.arange(16).reshape([4, 4])
input_ = np.concatenate((input_, input_)).reshape([2, 4, 4])
in_dims = Dim.named(c=2, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None)
assert np.array_equal(output_, [[[258, 294], [402, 438]], [[258, 294], [402, 438]],\
[[258, 294], [402, 438]], [[258, 294], [402, 438]]])
def test_conf2d_q():
calc_q = QType(32, 9, True)
biases_q = acc_q = out_q = QType(16, 4, True)
weights_q = QType(16, 4, True)
in_q = QType(16, 5, True)
biases = np.full([1], 0.5)
qbiases = biases_q.quantize(biases)
weights = np.full([3, 3], 0.5)
weights = np.repeat(weights, 2).reshape([2, 3, 3, 1])
qweights = weights_q.quantize(weights)
filt = Conv2DFilterDim(3, 3, 1, 2).impose_order(["in_c", "h", "w", "out_c"])
stride = StrideDim(1)
pad = PadDim(0)
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation)
qrec = FilterQuantizationRecord(in_qs=[in_q], out_qs=[out_q], weights_q=weights_q,
biases_q=biases_q, acc_q=acc_q, calc_q=calc_q)
input_ = np.full([2, 4, 4], 2)
qinput_ = in_q.quantize(input_)
in_dims = Dim.named(c=2, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, biases)
qoutput_ = conv2d(params, in_dims, out_dims[0], qinput_, qweights, qbiases, qrec=qrec)
dqoutput_ = out_q.dequantize(qoutput_)
assert np.array_equal(output_, dqoutput_)
def test_conf2d_pad():
weights = np.arange(9).reshape([1, 1, 3, 3])
filt = Conv2DFilterDim(3, 3, 1, 1)
stride = StrideDim(1)
pad = PadDim.same()
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation)
input_ = np.arange(16).reshape([1, 4, 4])
in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None)
assert np.array_equal(output_, [[[73, 121, 154, 103], [171, 258, 294, 186],\
[279, 402, 438, 270], [139, 187, 202, 113]]])
def test_conf2d_2_out_c_pad():
weights = np.arange(9).reshape([1, 3, 3])
weights = np.append(weights, weights, axis=0).reshape([2, 1, 3, 3])
filt = Conv2DFilterDim(3, 3, 2, 1)
stride = StrideDim(1)
pad = PadDim.same()
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation)
input_ = np.arange(16).reshape([1, 4, 4])
in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None)
assert np.array_equal(output_, [[[73, 121, 154, 103], [171, 258, 294, 186],\
[279, 402, 438, 270], [139, 187, 202, 113]], [[73, 121, 154, 103], [171, 258, 294, 186],\
[279, 402, 438, 270], [139, 187, 202, 113]]])
def test_conf2d_2_in_2_out_c():
weights = np.arange(4).reshape([1, 2, 2])
weights = np.append(weights, weights, axis=0)
weights = np.append(weights, weights, axis=0)
weights = weights.reshape([2, 2, 2, 2])
filt = Conv2DFilterDim(2, 2, 2, 2)
stride = StrideDim(1)
pad = PadDim.valid()
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation)
input_ = np.arange(9).reshape([1, 3, 3])
input_ = np.append(input_, input_, axis=0)
in_dims = Dim.named(c=2, h=3, w=3).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None)
assert np.array_equal(output_, [[[38., 50.], [74., 86.]],\
[[38., 50.], [74., 86.]]])
def test_conf2d_pad_dilate():
weights = np.arange(9).reshape([1, 1, 3, 3])
filt = Conv2DFilterDim(3, 3, 1, 1)
stride = StrideDim(1)
pad = PadDim.same()
dilation = DilationDim(2)
params = Conv2DParameters("test", filt, stride, pad, dilation)
input_ = np.arange(16).reshape([1, 4, 4])
in_dims = Dim.named(c=1, h=4, w=4).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, None)
assert np.array_equal(output_, [[[266., 206.], [98., 66.]]])
def test_conf2d_q2(caplog):
caplog.set_level(logging.INFO)
weights_q = QType(16, 1, True)
weights = weights_q.quantize(np.full([1, 1, 2, 2], 1.0))
filt = Conv2DFilterDim(2, 2, 1, 1)
stride = StrideDim(1)
pad = PadDim.valid()
dilation = DilationDim(1)
params = Conv2DParameters("test", filt, stride, pad, dilation)
in_q = QType(16, 0, True)
calc_q = QType(weights_q.bits + in_q.bits, weights_q.q + in_q.q, True)
qrec = FilterQuantizationRecord(in_qs=[in_q], out_qs=[in_q], weights_q=weights_q,
acc_q=calc_q, calc_q=calc_q)
input_ = in_q.quantize(np.full([1, 2, 2], 1.0))
in_dims = Dim.named(c=1, h=2, w=2).impose_order(['c', 'h', 'w'])
out_dims = params.get_output_size([in_dims])
output_ = conv2d(params, in_dims, out_dims[0], input_, weights, None, qrec=qrec)
output_ = in_q.dequantize(output_)
assert np.array_equal(output_, [[[4.]]])
def test_av_pool_normal():
filt = PoolFilterDim(2, 2)
stride = StrideDim(1)
pad = PadDim(0)
params = PoolingParameters("test", filt, stride, pad, pool_type="average")
input_ =
|
np.arange(9)
|
numpy.arange
|
# -*- coding: utf-8 -*-
# File: config.py
import numpy as np
# mode flags ---------------------
MODE_MASK = True
# dataset -----------------------
BASEDIR = '/path/to/your/COCO/DIR'
TRAIN_DATASET = ['train2014', 'valminusminival2014']
VAL_DATASET = 'minival2014' # only support evaluation on single dataset
NUM_CLASS = 81
CLASS_NAMES = [] # NUM_CLASS strings. Will be populated later by coco loader
# basemodel ----------------------
RESNET_NUM_BLOCK = [3, 4, 6, 3] # for resnet50
# RESNET_NUM_BLOCK = [3, 4, 23, 3] # for resnet101
FREEZE_AFFINE = False # do not train affine parameters inside BN
# schedule -----------------------
BASE_LR = 1e-2
WARMUP = 1000 # in steps
STEPS_PER_EPOCH = 500
# LR_SCHEDULE = [120000, 160000, 180000] # "1x" schedule in detectron
# LR_SCHEDULE = [150000, 230000, 280000] # roughly a "1.5x" schedule
LR_SCHEDULE = [240000, 320000, 360000] # "2x" schedule in detectron
# image resolution --------------------
SHORT_EDGE_SIZE = 800
MAX_SIZE = 1333
# Alternative (worse & faster) setting: 600, 1024
# anchors -------------------------
ANCHOR_STRIDE = 16
ANCHOR_STRIDES_FPN = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES
ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box
ANCHOR_RATIOS = (0.5, 1., 2.)
NUM_ANCHOR = len(ANCHOR_SIZES) * len(ANCHOR_RATIOS)
POSITIVE_ANCHOR_THRES = 0.7
NEGATIVE_ANCHOR_THRES = 0.3
BBOX_DECODE_CLIP = np.log(MAX_SIZE / 16.0) # to avoid too large numbers.
# rpn training -------------------------
RPN_FG_RATIO = 0.5 # fg ratio among selected RPN anchors
RPN_BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid
RPN_MIN_SIZE = 0
RPN_PROPOSAL_NMS_THRESH = 0.7
TRAIN_PRE_NMS_TOPK = 12000
TRAIN_POST_NMS_TOPK = 2000
CROWD_OVERLAP_THRES = 0.7 # boxes overlapping crowd will be ignored.
# fastrcnn training ---------------------
FASTRCNN_BATCH_PER_IM = 512
FASTRCNN_BBOX_REG_WEIGHTS =
|
np.array([10, 10, 5, 5], dtype='float32')
|
numpy.array
|
# import the necessary packages
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from spaces.DiscreteSpace import DiscreteSpace
class DeterministicMDP:
"""This represents a deterministic MDP"""
def __init__(self, name, state_tensor_shape, action_space, state_space, transition_function, reward_function, initial_state):
"""Construct a new general deterministic MDP.
Args:
state_tensor_shape: A list [d_0, ..., d_{N-1}] containing the shape of the state tensor
action_space: The action space to use, has to be derived from DiscreteSpace as well.
state_space: The state space to use, it has to be derived from DiscreteSpace as well.
transition_function: The function, which maps state, actions to states matrix.
reward_function: The reward matrix, stating the reward for each state, action pair.
initial_state: The initial state
"""
# check if the parameters are indeed the correct instances
assert isinstance(action_space, DiscreteSpace)
assert isinstance(state_space, DiscreteSpace)
assert isinstance(initial_state, int) and 0 <= initial_state < state_space.get_size()
# save the number of states
self.state_tensor_shape = state_tensor_shape
self.dim = len(state_tensor_shape)
self.initial_state = initial_state
self.action_space = action_space
self.state_space = state_space
self.name = name
self.q_function = None
self.optimal_reward = None
with tf.variable_scope("env_{}".format(name)):
if isinstance(reward_function, np.ndarray):
# Do some assertions on the passed reward and transition functions.
# They need to have the height of the state space and the width of
state_action_shape = (state_space.get_size(), action_space.get_size())
assert np.shape(transition_function) == state_action_shape
assert np.shape(reward_function) == state_action_shape
# check if transition function is valid
for i in range(np.size(transition_function, 0)):
for j in range(np.size(transition_function, 1)):
assert 0 <= transition_function[i, j] < state_space.get_size()
# save passed parameters
self.transition = tf.constant(transition_function, dtype=tf.int64)
self.rewards = tf.constant(reward_function, dtype=tf.float64)
self.reward_function = reward_function
self.transition_function = transition_function
else:
self.transition = transition_function
self.rewards = reward_function
# Create the current state vector as well as the operation to reset it to the initial state
init = tf.constant(self.initial_state, shape=state_tensor_shape, dtype=tf.int64)
self.current_states = tf.get_variable("current_state", dtype=tf.int64, initializer=init)
self.cum_rewards = tf.get_variable("cum_rewards", state_tensor_shape, dtype=tf.float64, initializer=tf.zeros_initializer)
self.eps_rewards = tf.get_variable("eps_rewards", state_tensor_shape, dtype=tf.float64, initializer=tf.zeros_initializer)
reset_state = tf.assign(self.current_states, init)
zero_const = tf.constant(0.0, shape=state_tensor_shape, dtype=tf.float64)
reset_cum_rewards = tf.assign(self.cum_rewards, zero_const)
reset_eps_rewards = tf.assign(self.eps_rewards, zero_const)
self.reset_op = tf.group(reset_state, reset_cum_rewards, reset_eps_rewards)
def get_current_state(self):
return self.current_states
def get_rewards(self):
return self.eps_rewards
def perform_actions(self, actions):
# access the state action values inside of the transition function
selection = tf.stack([self.current_states, actions], axis=self.dim)
next_state = tf.gather_nd(self.transition, selection)
rewards = tf.gather_nd(self.rewards, selection)
ass_curr_state = tf.assign(self.current_states, next_state)
ass_coll_rewards = tf.assign_add(self.cum_rewards, rewards)
ass_eps_rewards = tf.assign(self.eps_rewards, rewards)
# save the reward and update state
return tf.group(ass_curr_state, ass_coll_rewards, ass_eps_rewards), next_state
def clone(self, new_name):
return DeterministicMDP(new_name, self.state_tensor_shape,
self.action_space, self.state_space,
self.transition_function, self.reward_function,
self.initial_state)
def get_optimal(self, steps, discount):
"""This gets the optimal reward using value iteration."""
if self.q_function is None:
state_size = self.state_space.get_size()
action_size = self.action_space.get_size()
# init q function
q_shape = (state_size, action_size)
q_function = -np.ones(q_shape)
next_q_function = np.zeros(q_shape)
# repeat until converged
while np.max(np.abs(q_function - next_q_function)) >= 0.001:
# create next bootstrapped q function
q_function = next_q_function
bootstrapped_q_function = np.empty(q_shape)
# iterate over all fields
for s in range(state_size):
for a in range(action_size):
next_state = self.transition_function[s, a]
bootstrapped_q_function[s, a] = np.max(q_function[next_state, :])
# update the q function correctly
next_q_function = self.reward_function + discount * bootstrapped_q_function
# create new environment and simulate
optimal_policy = np.argmax(q_function, axis=1)
reward = 0
current_state = self.initial_state
# run for specified number of steps
for k in range(steps):
reward += self.reward_function[current_state, optimal_policy[current_state]]
current_state = self.transition_function[current_state, optimal_policy[current_state]]
self.optimal_reward = reward
self.q_function = q_function
# init q function
q_shape = (state_size, action_size)
q_function = -np.ones(q_shape)
next_q_function =
|
np.zeros(q_shape)
|
numpy.zeros
|
from pyquad import quad_grid
import numpy as np
from scipy.integrate import quad
from scipy.optimize import fsolve
from astropy import cosmology as cosmo
import inspect
from numba import cfunc
from numba.types import intc, CPointer, float64
from scipy import LowLevelCallable
from scipy import special
import autofit as af
from autoastro.util import cosmology_util
from autoarray import decorator_util
from autoarray.structures import arrays, grids
from autofit.tools import text_util
from autoastro import dimensions as dim
from autoastro.profiles import geometry_profiles
from autoastro.profiles import mass_profiles as mp
from autoastro import exc
def jit_integrand(integrand_function):
jitted_function = decorator_util.jit(nopython=True, cache=True)(integrand_function)
no_args = len(inspect.getfullargspec(integrand_function).args)
wrapped = None
if no_args == 4:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3])
elif no_args == 5:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4])
elif no_args == 6:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5])
elif no_args == 7:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6])
elif no_args == 8:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(
xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6], xx[7]
)
elif no_args == 9:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(
xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6], xx[7], xx[8]
)
elif no_args == 10:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(
xx[0], xx[1], xx[2], xx[3], xx[4], xx[5], xx[6], xx[7], xx[8], xx[9]
)
elif no_args == 11:
# noinspection PyUnusedLocal
def wrapped(n, xx):
return jitted_function(
xx[0],
xx[1],
xx[2],
xx[3],
xx[4],
xx[5],
xx[6],
xx[7],
xx[8],
xx[9],
xx[10],
)
cf = cfunc(float64(intc, CPointer(float64)))
return LowLevelCallable(cf(wrapped).ctypes)
class DarkProfile:
pass
# noinspection PyAbstractClass
class AbstractEllipticalGeneralizedNFW(
mp.EllipticalMassProfile, mp.MassProfile, DarkProfile
):
epsrel = 1.49e-5
@af.map_types
def __init__(
self,
centre: dim.Position = (0.0, 0.0),
axis_ratio: float = 1.0,
phi: float = 0.0,
kappa_s: float = 0.05,
inner_slope: float = 1.0,
scale_radius: dim.Length = 1.0,
):
"""
The elliptical NFW profiles, used to fit the dark matter halo of the lens.
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
axis_ratio : float
Ratio of profiles ellipse's minor and major axes (b/a).
phi : float
Rotational angle of profiles ellipse counter-clockwise from positive x-axis.
kappa_s : float
The overall normalization of the dark matter halo \
(kappa_s = (rho_s * scale_radius)/lensing_critical_density)
inner_slope : float
The inner slope of the dark matter halo
scale_radius : float
The arc-second radius where the average density within this radius is 200 times the critical density of \
the Universe..
"""
super(AbstractEllipticalGeneralizedNFW, self).__init__(
centre=centre, axis_ratio=axis_ratio, phi=phi
)
super(mp.MassProfile, self).__init__()
self.kappa_s = kappa_s
self.scale_radius = scale_radius
self.inner_slope = inner_slope
def tabulate_integral(self, grid, tabulate_bins):
"""Tabulate an integral over the convergence of deflection potential of a mass profile. This is used in \
the GeneralizedNFW profile classes to speed up the integration procedure.
Parameters
-----------
grid : aa.Grid
The grid of (y,x) arc-second coordinates the potential / deflection_stacks are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile.
"""
eta_min = 1.0e-4
eta_max = 1.05 * np.max(self.grid_to_elliptical_radii(grid))
minimum_log_eta = np.log10(eta_min)
maximum_log_eta = np.log10(eta_max)
bin_size = (maximum_log_eta - minimum_log_eta) / (tabulate_bins - 1)
return eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size
@grids.convert_coordinates_to_grid
@geometry_profiles.transform_grid
@geometry_profiles.move_grid_to_radial_minimum
def convergence_from_grid(self, grid):
""" Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid : aa.Grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
grid_eta = self.grid_to_elliptical_radii(grid=grid)
return self.convergence_func(grid_radius=grid_eta)
@property
def ellipticity_rescale(self):
return 1.0 - ((1.0 - self.axis_ratio) / 2.0)
def coord_func_f(self, grid_radius):
if isinstance(grid_radius, np.ndarray):
return self.coord_func_f_jit(
grid_radius=grid_radius,
f=np.ones(shape=grid_radius.shape[0], dtype="complex64"),
)
else:
return self.coord_func_f_float_jit(grid_radius=grid_radius)
@staticmethod
@decorator_util.jit()
def coord_func_f_jit(grid_radius, f):
for index in range(f.shape[0]):
if np.real(grid_radius[index]) > 1.0:
f[index] = (
1.0 / np.sqrt(np.square(grid_radius[index]) - 1.0)
) * np.arccos(np.divide(1.0, grid_radius[index]))
elif np.real(grid_radius[index]) < 1.0:
f[index] = (
1.0 / np.sqrt(1.0 - np.square(grid_radius[index]))
) * np.arccosh(np.divide(1.0, grid_radius[index]))
return f
@staticmethod
@decorator_util.jit()
def coord_func_f_float_jit(grid_radius):
if np.real(grid_radius) > 1.0:
return (1.0 / np.sqrt(np.square(grid_radius) - 1.0)) * np.arccos(
np.divide(1.0, grid_radius)
)
elif np.real(grid_radius) < 1.0:
return (1.0 / np.sqrt(1.0 - np.square(grid_radius))) * np.arccosh(
np.divide(1.0, grid_radius)
)
else:
return 1.0
def coord_func_g(self, grid_radius):
f_r = self.coord_func_f(grid_radius=grid_radius)
if isinstance(grid_radius, np.ndarray):
return self.coord_func_g_jit(
grid_radius=grid_radius,
f_r=f_r,
g=np.zeros(shape=grid_radius.shape[0], dtype="complex64"),
)
else:
return self.coord_func_g_float_jit(grid_radius=grid_radius, f_r=f_r)
@staticmethod
@decorator_util.jit()
def coord_func_g_jit(grid_radius, f_r, g):
for index in range(f_r.shape[0]):
if np.real(grid_radius[index]) > 1.0:
g[index] = (1.0 - f_r[index]) / (np.square(grid_radius[index]) - 1.0)
elif np.real(grid_radius[index]) < 1.0:
g[index] = (f_r[index] - 1.0) / (1.0 - np.square(grid_radius[index]))
else:
g[index] = 1.0 / 3.0
return g
@staticmethod
@decorator_util.jit()
def coord_func_g_float_jit(grid_radius, f_r):
if np.real(grid_radius) > 1.0:
return (1.0 - f_r) / (np.square(grid_radius) - 1.0)
elif np.real(grid_radius) < 1.0:
return (f_r - 1.0) / (1.0 - np.square(grid_radius))
else:
return 1.0 / 3.0
def coord_func_h(self, grid_radius):
return np.log(grid_radius / 2.0) + self.coord_func_f(grid_radius=grid_radius)
def rho_at_scale_radius_for_units(
self,
redshift_object,
redshift_source,
unit_length="arcsec",
unit_mass="solMass",
cosmology=cosmo.Planck15,
):
"""The Cosmic average density is defined at the redshift of the profile."""
kpc_per_arcsec = cosmology_util.kpc_per_arcsec_from_redshift_and_cosmology(
redshift=redshift_object, cosmology=cosmology
)
critical_surface_density = cosmology_util.critical_surface_density_between_redshifts_from_redshifts_and_cosmology(
redshift_0=redshift_object,
redshift_1=redshift_source,
cosmology=cosmology,
unit_length=self.unit_length,
unit_mass=unit_mass,
)
rho_at_scale_radius = (
self.kappa_s * critical_surface_density / self.scale_radius
)
rho_at_scale_radius = dim.MassOverLength3(
value=rho_at_scale_radius, unit_length=self.unit_length, unit_mass=unit_mass
)
return rho_at_scale_radius.convert(
unit_length=unit_length,
unit_mass=unit_mass,
kpc_per_arcsec=kpc_per_arcsec,
critical_surface_density=critical_surface_density,
)
def delta_concentration_for_units(
self,
redshift_object,
redshift_source,
unit_length="arcsec",
unit_mass="solMass",
redshift_of_cosmic_average_density="profile",
cosmology=cosmo.Planck15,
):
if redshift_of_cosmic_average_density is "profile":
redshift_calc = redshift_object
elif redshift_of_cosmic_average_density is "local":
redshift_calc = 0.0
else:
raise exc.UnitsException(
"The redshift of the cosmic average density haas been specified as an invalid "
"string. Must be (local | profile)"
)
cosmic_average_density = cosmology_util.cosmic_average_density_from_redshift_and_cosmology(
redshift=redshift_calc,
cosmology=cosmology,
unit_length=unit_length,
unit_mass=unit_mass,
)
rho_scale_radius = self.rho_at_scale_radius_for_units(
unit_length=unit_length,
unit_mass=unit_mass,
redshift_object=redshift_object,
redshift_source=redshift_source,
cosmology=cosmology,
)
return rho_scale_radius / cosmic_average_density
def concentration_for_units(
self,
redshift_profile,
redshift_source,
unit_length="arcsec",
unit_mass="solMass",
redshift_of_cosmic_average_density="profile",
cosmology=cosmo.Planck15,
):
delta_concentration = self.delta_concentration_for_units(
redshift_object=redshift_profile,
redshift_source=redshift_source,
unit_length=unit_length,
unit_mass=unit_mass,
redshift_of_cosmic_average_density=redshift_of_cosmic_average_density,
cosmology=cosmology,
)
return fsolve(
func=self.concentration_func, x0=10.0, args=(delta_concentration,)
)[0]
@staticmethod
def concentration_func(concentration, delta_concentration):
return (
200.0
/ 3.0
* (
concentration
* concentration
* concentration
/ (np.log(1 + concentration) - concentration / (1 + concentration))
)
- delta_concentration
)
def radius_at_200_for_units(
self,
redshift_object,
redshift_source,
unit_length="arcsec",
unit_mass="solMass",
redshift_of_cosmic_average_density="profile",
cosmology=cosmo.Planck15,
):
kpc_per_arcsec = cosmology_util.kpc_per_arcsec_from_redshift_and_cosmology(
redshift=redshift_object, cosmology=cosmology
)
concentration = self.concentration_for_units(
redshift_profile=redshift_object,
redshift_source=redshift_source,
unit_length=unit_length,
unit_mass=unit_mass,
redshift_of_cosmic_average_density=redshift_of_cosmic_average_density,
cosmology=cosmology,
)
radius_at_200 = dim.Length(
value=concentration * self.scale_radius, unit_length=self.unit_length
)
return radius_at_200.convert(
unit_length=unit_length, kpc_per_arcsec=kpc_per_arcsec
)
def mass_at_200_for_units(
self,
redshift_object,
redshift_source,
unit_length="arcsec",
unit_mass="solMass",
redshift_of_cosmic_average_density="profile",
cosmology=cosmo.Planck15,
):
if redshift_of_cosmic_average_density is "profile":
redshift_calc = redshift_object
elif redshift_of_cosmic_average_density is "local":
redshift_calc = 0.0
else:
raise exc.UnitsException(
"The redshift of the cosmic average density haas been specified as an invalid "
"string. Must be (local | profile)"
)
cosmic_average_density = cosmology_util.cosmic_average_density_from_redshift_and_cosmology(
redshift=redshift_calc,
cosmology=cosmology,
unit_length=unit_length,
unit_mass=unit_mass,
)
critical_surface_density = cosmology_util.critical_surface_density_between_redshifts_from_redshifts_and_cosmology(
redshift_0=redshift_object,
redshift_1=redshift_source,
cosmology=cosmology,
unit_length=self.unit_length,
unit_mass=unit_mass,
)
radius_at_200 = self.radius_at_200_for_units(
redshift_object=redshift_object,
redshift_source=redshift_source,
unit_length=unit_length,
unit_mass=unit_mass,
redshift_of_cosmic_average_density=redshift_of_cosmic_average_density,
cosmology=cosmology,
)
mass_at_200 = dim.Mass(
200.0
* ((4.0 / 3.0) * np.pi)
* cosmic_average_density
* (radius_at_200 ** 3.0),
unit_mass=unit_mass,
)
return mass_at_200.convert(
unit_mass=unit_mass, critical_surface_density=critical_surface_density
)
def summarize_in_units(
self,
radii,
prefix="",
whitespace=80,
unit_length="arcsec",
unit_mass="solMass",
redshift_profile=None,
redshift_source=None,
redshift_of_cosmic_average_density="profile",
cosmology=cosmo.Planck15,
):
summary = super().summarize_in_units(
radii=radii,
prefix=prefix,
unit_length=unit_length,
unit_mass=unit_mass,
redshift_profile=redshift_profile,
redshift_source=redshift_source,
cosmology=cosmology,
whitespace=whitespace,
)
rho_at_scale_radius = self.rho_at_scale_radius_for_units(
unit_length=unit_length,
unit_mass=unit_mass,
redshift_object=redshift_profile,
redshift_source=redshift_source,
cosmology=cosmology,
)
summary += [
text_util.label_value_and_unit_string(
label=prefix + "rho_at_scale_radius",
value=rho_at_scale_radius,
unit=unit_mass + "/" + unit_length + "3",
whitespace=whitespace,
)
]
delta_concentration = self.delta_concentration_for_units(
unit_length=unit_length,
unit_mass=unit_mass,
redshift_object=redshift_profile,
redshift_source=redshift_source,
redshift_of_cosmic_average_density=redshift_of_cosmic_average_density,
cosmology=cosmology,
)
summary += [
text_util.label_and_value_string(
label=prefix + "delta_concentration",
value=delta_concentration,
whitespace=whitespace,
)
]
concentration = self.concentration_for_units(
unit_length=unit_length,
unit_mass=unit_mass,
redshift_profile=redshift_profile,
redshift_source=redshift_source,
redshift_of_cosmic_average_density=redshift_of_cosmic_average_density,
cosmology=cosmology,
)
summary += [
text_util.label_and_value_string(
label=prefix + "concentration",
value=concentration,
whitespace=whitespace,
)
]
radius_at_200 = self.radius_at_200_for_units(
unit_length=unit_length,
unit_mass=unit_mass,
redshift_object=redshift_profile,
redshift_source=redshift_source,
redshift_of_cosmic_average_density=redshift_of_cosmic_average_density,
cosmology=cosmology,
)
summary += [
text_util.label_value_and_unit_string(
label=prefix + "radius_at_200x_cosmic_density",
value=radius_at_200,
unit=unit_length,
whitespace=whitespace,
)
]
mass_at_200 = self.mass_at_200_for_units(
unit_length=unit_length,
unit_mass=unit_mass,
redshift_object=redshift_profile,
redshift_source=redshift_source,
redshift_of_cosmic_average_density=redshift_of_cosmic_average_density,
cosmology=cosmology,
)
summary += [
text_util.label_value_and_unit_string(
label=prefix + "mass_at_200x_cosmic_density",
value=mass_at_200,
unit=unit_mass,
whitespace=whitespace,
)
]
return summary
@property
def unit_mass(self):
return "angular"
class EllipticalGeneralizedNFW(AbstractEllipticalGeneralizedNFW):
@grids.convert_coordinates_to_grid
@geometry_profiles.transform_grid
@geometry_profiles.move_grid_to_radial_minimum
def potential_from_grid(self, grid, tabulate_bins=1000):
"""
Calculate the potential at a given set of arc-second gridded coordinates.
Parameters
----------
grid : aa.Grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile.
"""
@jit_integrand
def deflection_integrand(x, kappa_radius, scale_radius, inner_slope):
return (x + kappa_radius / scale_radius) ** (inner_slope - 3) * (
(1 - np.sqrt(1 - x ** 2)) / x
)
eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size = self.tabulate_integral(
grid, tabulate_bins
)
potential_grid = np.zeros(grid.sub_shape_1d)
deflection_integral = np.zeros((tabulate_bins,))
for i in range(tabulate_bins):
eta = 10.0 ** (minimum_log_eta + (i - 1) * bin_size)
integral = quad(
deflection_integrand,
a=0.0,
b=1.0,
args=(eta, self.scale_radius, self.inner_slope),
epsrel=EllipticalGeneralizedNFW.epsrel,
)[0]
deflection_integral[i] = (
(eta / self.scale_radius) ** (2 - self.inner_slope)
) * (
(1.0 / (3 - self.inner_slope))
* special.hyp2f1(
3 - self.inner_slope,
3 - self.inner_slope,
4 - self.inner_slope,
-(eta / self.scale_radius),
)
+ integral
)
for i in range(grid.sub_shape_1d):
potential_grid[i] = (2.0 * self.kappa_s * self.axis_ratio) * quad(
self.potential_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
self.axis_ratio,
minimum_log_eta,
maximum_log_eta,
tabulate_bins,
deflection_integral,
),
epsrel=EllipticalGeneralizedNFW.epsrel,
)[0]
return potential_grid
@grids.convert_coordinates_to_grid
@grids.grid_interpolate
@geometry_profiles.cache
@geometry_profiles.transform_grid
@geometry_profiles.move_grid_to_radial_minimum
def deflections_from_grid(self, grid, tabulate_bins=1000):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : aa.Grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile.
"""
@jit_integrand
def surface_density_integrand(x, kappa_radius, scale_radius, inner_slope):
return (
(3 - inner_slope)
* (x + kappa_radius / scale_radius) ** (inner_slope - 4)
* (1 - np.sqrt(1 - x * x))
)
def calculate_deflection_component(npow, index):
deflection_grid = 2.0 * self.kappa_s * self.axis_ratio * grid[:, index]
deflection_grid *= quad_grid(
self.deflection_func,
0.0,
1.0,
grid,
args=(
npow,
self.axis_ratio,
minimum_log_eta,
maximum_log_eta,
tabulate_bins,
surface_density_integral,
),
epsrel=EllipticalGeneralizedNFW.epsrel,
)[0]
return deflection_grid
eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size = self.tabulate_integral(
grid, tabulate_bins
)
surface_density_integral = np.zeros((tabulate_bins,))
for i in range(tabulate_bins):
eta = 10.0 ** (minimum_log_eta + (i - 1) * bin_size)
integral = quad(
surface_density_integrand,
a=0.0,
b=1.0,
args=(eta, self.scale_radius, self.inner_slope),
epsrel=EllipticalGeneralizedNFW.epsrel,
)[0]
surface_density_integral[i] = (
(eta / self.scale_radius) ** (1 - self.inner_slope)
) * (((1 + eta / self.scale_radius) ** (self.inner_slope - 3)) + integral)
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_profile(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
def convergence_func(self, grid_radius):
def integral_y(y, eta):
return (y + eta) ** (self.inner_slope - 4) * (1 - np.sqrt(1 - y ** 2))
grid_radius = (1.0 / self.scale_radius) * grid_radius
for index in range(grid_radius.shape[0]):
integral_y_value = quad(
integral_y,
a=0.0,
b=1.0,
args=grid_radius[index],
epsrel=EllipticalGeneralizedNFW.epsrel,
)[0]
grid_radius[index] = (
2.0
* self.kappa_s
* (grid_radius[index] ** (1 - self.inner_slope))
* (
(1 + grid_radius[index]) ** (self.inner_slope - 3)
+ ((3 - self.inner_slope) * integral_y_value)
)
)
return grid_radius
@staticmethod
# TODO : Decorator needs to know that potential_integral is 1D arrays
# @jit_integrand
def potential_func(
u,
y,
x,
axis_ratio,
minimum_log_eta,
maximum_log_eta,
tabulate_bins,
potential_integral,
):
eta_u = np.sqrt((u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))))
bin_size = (maximum_log_eta - minimum_log_eta) / (tabulate_bins - 1)
i = 1 + int((np.log10(eta_u) - minimum_log_eta) / bin_size)
r1 = 10.0 ** (minimum_log_eta + (i - 1) * bin_size)
r2 = r1 * 10.0 ** bin_size
phi = potential_integral[i] + (
potential_integral[i + 1] - potential_integral[i]
) * (eta_u - r1) / (r2 - r1)
return eta_u * (phi / u) / (1.0 - (1.0 - axis_ratio ** 2) * u) ** 0.5
@staticmethod
# TODO : Decorator needs to know that surface_density_integral is 1D arrays
# @jit_integrand
def deflection_func(
u,
y,
x,
npow,
axis_ratio,
minimum_log_eta,
maximum_log_eta,
tabulate_bins,
surface_density_integral,
):
eta_u = np.sqrt((u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))))
bin_size = (maximum_log_eta - minimum_log_eta) / (tabulate_bins - 1)
i = 1 + int((np.log10(eta_u) - minimum_log_eta) / bin_size)
r1 = 10.0 ** (minimum_log_eta + (i - 1) * bin_size)
r2 = r1 * 10.0 ** bin_size
kap = surface_density_integral[i] + (
surface_density_integral[i + 1] - surface_density_integral[i]
) * (eta_u - r1) / (r2 - r1)
return kap / (1.0 - (1.0 - axis_ratio ** 2) * u) ** (npow + 0.5)
class SphericalGeneralizedNFW(EllipticalGeneralizedNFW):
@af.map_types
def __init__(
self,
centre: dim.Position = (0.0, 0.0),
kappa_s: float = 0.05,
inner_slope: float = 1.0,
scale_radius: dim.Length = 1.0,
):
"""
The spherical NFW profiles, used to fit the dark matter halo of the lens.
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
kappa_s : float
The overall normalization of the dark matter halo \
(kappa_s = (rho_s * scale_radius)/lensing_critical_density)
inner_slope : float
The inner slope of the dark matter halo.
scale_radius : float
The arc-second radius where the average density within this radius is 200 times the critical density of \
the Universe..
"""
super(SphericalGeneralizedNFW, self).__init__(
centre=centre,
axis_ratio=1.0,
phi=0.0,
kappa_s=kappa_s,
inner_slope=inner_slope,
scale_radius=scale_radius,
)
@grids.convert_coordinates_to_grid
@grids.grid_interpolate
@geometry_profiles.cache
@geometry_profiles.transform_grid
@geometry_profiles.move_grid_to_radial_minimum
def deflections_from_grid(self, grid, **kwargs):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : aa.Grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
eta = np.multiply(1.0 / self.scale_radius, self.grid_to_grid_radii(grid))
deflection_grid = np.zeros(grid.sub_shape_1d)
for i in range(grid.sub_shape_1d):
deflection_grid[i] = np.multiply(
4.0 * self.kappa_s * self.scale_radius, self.deflection_func_sph(eta[i])
)
return self.grid_to_grid_cartesian(grid, deflection_grid)
@staticmethod
def deflection_integrand(y, eta, inner_slope):
return (y + eta) ** (inner_slope - 3) * ((1 - np.sqrt(1 - y ** 2)) / y)
def deflection_func_sph(self, eta):
integral_y_2 = quad(
self.deflection_integrand,
a=0.0,
b=1.0,
args=(eta, self.inner_slope),
epsrel=1.49e-6,
)[0]
return eta ** (2 - self.inner_slope) * (
(1.0 / (3 - self.inner_slope))
* special.hyp2f1(
3 - self.inner_slope, 3 - self.inner_slope, 4 - self.inner_slope, -eta
)
+ integral_y_2
)
class SphericalTruncatedNFW(AbstractEllipticalGeneralizedNFW):
@af.map_types
def __init__(
self,
centre: dim.Position = (0.0, 0.0),
kappa_s: float = 0.05,
scale_radius: dim.Length = 1.0,
truncation_radius: dim.Length = 2.0,
):
super(SphericalTruncatedNFW, self).__init__(
centre=centre,
axis_ratio=1.0,
phi=0.0,
kappa_s=kappa_s,
inner_slope=1.0,
scale_radius=scale_radius,
)
self.truncation_radius = truncation_radius
self.tau = self.truncation_radius / self.scale_radius
def coord_func_k(self, grid_radius):
return np.log(
np.divide(
grid_radius,
np.sqrt(np.square(grid_radius) + np.square(self.tau)) + self.tau,
)
)
def coord_func_l(self, grid_radius):
f_r = self.coord_func_f(grid_radius=grid_radius)
g_r = self.coord_func_g(grid_radius=grid_radius)
k_r = self.coord_func_k(grid_radius=grid_radius)
return np.divide(self.tau ** 2.0, (self.tau ** 2.0 + 1.0) ** 2.0) * (
((self.tau ** 2.0 + 1.0) * g_r)
+ (2 * f_r)
- (np.pi / (np.sqrt(self.tau ** 2.0 + grid_radius ** 2.0)))
+ (
(
(self.tau ** 2.0 - 1.0)
/ (self.tau * (
|
np.sqrt(self.tau ** 2.0 + grid_radius ** 2.0)
|
numpy.sqrt
|
import torch
import numpy as np
from tqdm.auto import tqdm
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from pu import PUNNLoss
def train_epoch(x, y, model, loss_func, optimizer, use_gpu):
optimizer.zero_grad()
if use_gpu:
x, y = x.cuda(), y.cuda()
g = model(x)
loss = loss_func(g, y.reshape(-1, 1))
loss.backward()
optimizer.step()
return loss.item()
def train_pn(model, train_dataloader, test_dataloader, optimizer, epochs, use_gpu):
loss_func = lambda g, y: torch.mean(torch.log(1 + torch.exp(-y * g)))
if use_gpu:
model.cuda()
# Train
model.train()
pbar = tqdm(range(epochs), desc="Train PN")
for e in pbar:
for x, y in train_dataloader:
loss = train_epoch(x, y, model, loss_func, optimizer, use_gpu)
pbar.set_postfix({"loss": loss})
pbar.close()
# Eval
model.eval()
prob_list = []
with torch.no_grad():
for (x,) in test_dataloader:
if use_gpu:
x = x.cuda()
p = torch.sigmoid(model(x))
prob_list.append(p.data.cpu().numpy())
prob = np.concatenate(prob_list, axis=0)
return model, prob
def test_pu(model, dataloader, quant, use_gpu, pi=0):
theta = 0
p_list, y_list = [], []
with torch.no_grad():
for x, y in dataloader:
if use_gpu:
x = x.cuda()
p = model(x)
p_list.append(p.data.cpu().numpy())
y_list.append(y.numpy())
y = np.concatenate(y_list, axis=0)
prob = np.concatenate(p_list, axis=0)
if quant is True:
temp = np.copy(prob).flatten()
temp = np.sort(temp)
theta = temp[np.int(np.floor(len(prob) * (1 - pi)))]
pred = np.zeros(len(prob))
pred[(prob > theta).flatten()] = 1
accuracy = np.mean(pred == y)
precision = np.sum((pred == y)[pred == 1]) / np.sum(pred == 1)
recall = np.sum((pred == y)[y == 1]) / np.sum(y == 1)
return accuracy, precision, recall
def train_pu(pi, model, train_dataloader, test_dataloader, optimizer, epochs, use_gpu):
loss_list = []
acc_list = []
acc_quant_list = []
pre_list = []
rec_list = []
pre_quant_list = []
rec_quant_list = []
loss_func = PUNNLoss(pi)
if use_gpu:
model.cuda()
pbar = tqdm(range(epochs), desc="Train PU")
for e in pbar:
loss_step = 0
count = 0
# Train
model.train()
for x, y in train_dataloader:
loss = train_epoch(x, y, model, loss_func, optimizer, use_gpu)
loss_step += loss
count += 1
loss_step /= count
loss_list.append(loss_step)
# Eval
model.eval()
acc, pre, rec = test_pu(model, test_dataloader, False, use_gpu)
acc_quant, pre_quant, rec_quant = test_pu(
model, test_dataloader, True, use_gpu, pi
)
acc_list.append(acc)
pre_list.append(pre)
rec_list.append(rec)
acc_quant_list.append(acc_quant)
pre_quant_list.append(pre_quant)
rec_quant_list.append(rec_quant)
pbar.set_postfix({"loss": loss_step, "acc": acc, "acc_quant": acc_quant})
pbar.close()
loss_list = np.array(loss_list)
acc_list = np.array(acc_list)
pre_list = np.array(pre_list)
rec_list = np.array(rec_list)
acc_quant_list = np.array(acc_quant_list)
pre_quant_list = np.array(pre_quant_list)
rec_quant_list = np.array(rec_quant_list)
return (
model,
loss_list,
acc_list,
pre_list,
rec_list,
acc_quant_list,
pre_quant_list,
rec_quant_list,
)
def train_model(
pn_model,
pu_model,
pn_optimizer,
pu_optimizer,
x_train,
y_train,
x_test,
y_test,
pdata,
epochs=100,
batch_size=64,
use_gpu=True,
):
# Data Process
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
pi = np.mean(y_train)
x =
|
np.concatenate([x_train, x_test], axis=0)
|
numpy.concatenate
|
import numpy as np
import sys
import numpy.random as rand
from scipy.stats import bernoulli
import pdb
import matplotlib.pyplot as plt
import copy
import pandas as pd
def unison_shuffled_copies(a, b, c):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p], c[p]
def cb_backdoor(index_n_labels,p,qyu,N):
pu_y = np.array([qyu, 1-qyu])
if qyu < 0:
pu_y = np.array([1+qyu, -qyu])
filename = np.array(index_n_labels['filename'].tolist())
Y_all = np.array(index_n_labels['label'].tolist())
U_all = np.array(index_n_labels['conf'].tolist())
la_all = pd.DataFrame(data={'Y_all':Y_all, 'U_all':U_all})
Y = rand.binomial(1,p,N)
U = rand.binomial(1,pu_y[Y])
yr = np.unique(Y); ur = np.unique(U);
ur_r = np.unique(U_all); yr_r = np.unique(Y_all)
la = pd.DataFrame(data={'Y':Y,'U':U})
Ns = []; Ns_real = []; idn = []; idx = []
for y in yr:
for u in ur:
ns = len(la.index[(la['Y']==y) & (la['U']==u)].tolist())
Ns.append(ns)
idn += la.index[(la['Y']==y) & (la['U']==u)].tolist()
Ns_real.append(len(la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()))
idx += la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()[:ns]
Y = Y[idn]; U = U[idn]
U = np.array(U, dtype=int); Y = np.array(Y, dtype=int) ## to make sure that they can be used as indices in later part of the code
## Step 1: estimate f(u,y), f(y) and f(u|y)
Nyu,_,_ = np.histogram2d(Y,U, bins = [len(yr),len(ur)])
pyu_emp = Nyu/N
pu_emp = np.sum(pyu_emp, axis=0)
py_emp = np.sum(pyu_emp, axis=1)
py_u_emp = pyu_emp/pu_emp
## Step 2: for each y in range of values of Y variable
i = np.arange(0,len(idx)) # indices
w = np.zeros(len(idx)) # weights for the indices
i_new = []
for m in range(len(yr)):
j = np.where(Y==yr[m])[0]
w[j] = (((Y==yr[m])/py_u_emp[m,U])/N)[j]
# Step 3: Resample Indices according to weight w
i_new = i_new + list(rand.choice(j,size=j.shape[0],replace=True,p=w[j]))
i_new.sort()
# Step 4: New indices for unbiased data
idx = np.array(idx, dtype=int)
idx_new = idx[i_new]
# confounded data
filename_conf = filename[idx]
Y_conf = Y; U_conf = U
filename_conf,Y_conf,U_conf = unison_shuffled_copies(filename_conf,Y_conf,U_conf)
labels_conf = np.array([filename_conf, Y_conf, U_conf]).transpose(1,0)
# unconfounded data
filename_deconf = filename[idx_new]
Y_deconf = Y[i_new]; U_deconf = U[i_new]
filename_deconf,Y_deconf,U_deconf = unison_shuffled_copies(filename_deconf,Y_deconf,U_deconf)
labels_deconf = np.array([filename_deconf, Y_deconf, U_deconf]).transpose(1,0)
return labels_conf, labels_deconf
def cb_frontdoor(index_n_labels,p,qyu,qzy,N):
pz_y = np.array([1-qzy, qzy]) # p(z/y) (correlated)
if qyu<0:
pu_y = np.array([1+qyu, -qyu])
else:
pu_y = np.array([qyu, 1-qyu])
filename = np.array(index_n_labels['filename'].tolist())
Y_all = np.array(index_n_labels['label'].tolist())
U_all = np.array(index_n_labels['conf'].tolist())
la_all = pd.DataFrame(data={'Y_all':Y_all, 'U_all':U_all})
Y = rand.binomial(1,p,N)
Z = rand.binomial(1,pz_y[Y])
U = rand.binomial(1,pu_y[Y])
yr = np.unique(Y); ur = np.unique(U); zr = np.unique(Z)
ur_r = np.unique(U_all); yr_r = np.unique(Y_all)
la = pd.DataFrame(data={'Y':Y,'U':U,'Z':Z})
Ns = []; Ns_real = []; idn = []; idx = []
for y in yr:
for u in ur:
## since we are sampling from z
ns = len(la.index[(la['Z']==y) & (la['U']==u)].tolist())
Ns.append(ns)
idn += la.index[(la['Z']==y) & (la['U']==u)].tolist()
Ns_real.append(len(la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()))
idx += la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()[:ns]
Y = Y[idn]; U = U[idn]; Z = Z[idn]
## to make sure that they can be used as indices in later part of the code
U = np.array(U, dtype=int); Y = np.array(Y, dtype=int) ; Z = np.array(Z, dtype=int)
Nyz,_,_ = np.histogram2d(Y,Z,bins= [len(yr),len(zr)])
pyz_emp = Nyz/N
pz_emp = np.sum(pyz_emp, axis=0)
py_emp = np.sum(pyz_emp, axis=1)
pz_y_emp = np.transpose(pyz_emp)/py_emp
## Step 2: for each y in range of values of Y variable
i = np.arange(0,N) # indices
k = 0
w = np.zeros(N) # weights for the indices
i_new = []
Y_new = []
for m in range(len(yr)):
j = np.where(Y==yr[m])[0]
w = (pz_y_emp[Z,m]/pz_y_emp[Z,Y]/N)
# Step 3: Resample Indices according to weight w
i_new = i_new + list(rand.choice(N,size=j.shape[0],replace=True,p=w))
Y_new += [m]*j.shape[0]
i_new.sort()
# Step 4: New indices for unbiased data
idx = np.array(idx)
idx_new = idx[i_new]
# confounded data
filename_conf = filename[idx]
Y_conf = Y; Z_conf = Z; U_conf = U
labels_conf = np.array([filename_conf, Y_conf, U_conf, Z_conf]).transpose(1,0)
# unconfounded data
filename_deconf = filename[idx_new]
Y_deconf = np.array(Y_new); Z_deconf = Z[i_new]; U_deconf = U[i_new]
labels_deconf = np.array([filename_deconf, Y_deconf, U_deconf, Z_deconf]).transpose(1,0)
## sanity check (these distribustions should change suitabely for deconfounded data)
# Nyu,_,_ = np.histogram2d(Y_deconf,U_deconf,bins=[len(yr),len(ur)])
# pyu_emp = Nyu/N
# pu_emp = np.sum(pyu_emp, axis=0)
# py_emp = np.sum(pyu_emp, axis=1)
# py_u_emp = np.transpose(pyu_emp)/py_emp
# ## estimate f(z,y,u) to get f(z/y,u)
# mat = np.array([Z_deconf,Y_deconf,U_deconf]).transpose(1,0)
# H, [by, bu, bz]= np.histogramdd(mat,bins=[len(yr),len(ur),len(zr)])
# iz, iy, iu = np.where(H)
# pzyu_emp = H/N
# pu_emp = np.sum(np.sum(pzyu_emp, axis=0),axis=0)
# pz_emp = np.sum(np.sum(pzyu_emp, axis=2),axis=1)
# py_emp = np.sum(np.sum(pzyu_emp, axis=2),axis=0)
# pyu_emp = np.sum(pzyu_emp, axis=0)
# pz_yu_emp = pzyu_emp/np.expand_dims(pyu_emp, axis=0)
# pdb.set_trace()
return labels_conf, labels_deconf
## the case with both confounding and mediator
def cb_front_n_back(index_n_labels,p,qyu,qzy,N):
pz_y = np.array([1-qzy, qzy]) # p(z/y) (correlated)
if qyu<0:
pu_y = np.array([1+qyu, -qyu])
else:
pu_y = np.array([qyu, 1-qyu])
filename = np.array(index_n_labels['filename'].tolist())
Y_all = np.array(index_n_labels['label'].tolist())
U_all = np.array(index_n_labels['conf'].tolist())
la_all = pd.DataFrame(data={'Y_all':Y_all, 'U_all':U_all})
Y = rand.binomial(1,p,N)
Z = rand.binomial(1,pz_y[Y])
U = rand.binomial(1,pu_y[Y])
yr = np.unique(Y); ur = np.unique(U); zr = np.unique(Z)
ur_r = np.unique(U_all); yr_r = np.unique(Y_all)
la = pd.DataFrame(data={'Y':Y,'U':U,'Z':Z})
Ns = []; Ns_real = []; idn = []; idx = []
for y in yr:
for u in ur:
## since we are sampling from z
ns = len(la.index[(la['Z']==y) & (la['U']==u)].tolist())
Ns.append(ns)
idn += la.index[(la['Z']==y) & (la['U']==u)].tolist()
Ns_real.append(len(la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()))
idx += la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()[:ns]
Y = Y[idn]; U = U[idn]; Z = Z[idn]
## to make sure that they can be used as indices in later part of the code
U = np.array(U, dtype=int); Y = np.array(Y, dtype=int) ; Z = np.array(Z, dtype=int)
## Step 1: estimate f(z,y), f(y) and f(z|y)
Nyz,_,_ = np.histogram2d(Y,Z,bins=[len(yr),len(zr)])
pyz_emp = Nyz/N
pz_emp = np.sum(pyz_emp, axis=0)
py_emp = np.sum(pyz_emp, axis=1)
pz_y_emp = np.transpose(pyz_emp)/py_emp
Nuz,_,_ = np.histogram2d(U,Z,bins=[len(ur),len(zr)])
puz_emp = Nuz/N
pz_emp = np.sum(puz_emp, axis=0)
pu_emp = np.sum(puz_emp, axis=1)
pz_u_emp = np.transpose(puz_emp)/pu_emp
## Step 2: for each y in range of values of Y variable
i = np.arange(0,N) # indices
k = 0
w = np.zeros(N) # weights for the indices
i_new = []
Y_new = []
for m in range(len(yr)):
j = np.where(Y==yr[m])[0]
w = (pz_y_emp[Z,m]/(pz_u_emp[Z,U])/N) ## conditional distribution is done by taking samples from p(x,y,z)
## and normalising by p(y,u)
#
# print(sum(w))
w = w/sum(w) ## to renormalise 0.99999999976
# Step 3: Resample Indices according to weight w
i_new = i_new + list(rand.choice(N,size=j.shape[0],replace=True,p=w))
Y_new += [m]*j.shape[0]
i_new.sort()
idx = np.array(idx)
idx_new = idx[i_new]
# confounded data
filename_conf = filename[idx]
Y_conf = Y; Z_conf = Z; U_conf = U
labels_conf = np.array([filename_conf, Y_conf, U_conf, Z_conf]).transpose(1,0)
# unconfounded data
filename_deconf = filename[idx_new]
Y_deconf = np.array(Y_new); Z_deconf = Z[i_new]; U_deconf = U[i_new]
labels_deconf = np.array([filename_deconf, Y_deconf, U_deconf, Z_deconf]).transpose(1,0)
## sanity check (these distribustions should change suitabely for deconfounded data)
# Nyu,_,_ = np.histogram2d(Y_deconf,U_deconf,bins=[len(yr),len(ur)])
# pyu_emp = Nyu/N
# pu_emp = np.sum(pyu_emp, axis=0)
# py_emp = np.sum(pyu_emp, axis=1)
# py_u_emp = np.transpose(pyu_emp)/py_emp
# ## estimate f(z,y,u) to get f(z/y,u)
# mat = np.array([Z_deconf,Y_deconf,U_deconf]).transpose(1,0)
# H, [by, bu, bz]= np.histogramdd(mat,bins=[len(yr),len(ur),len(zr)])
# iz, iy, iu = np.where(H)
# pzyu_emp = H/N
# pu_emp = np.sum(np.sum(pzyu_emp, axis=0),axis=0)
# pz_emp = np.sum(np.sum(pzyu_emp, axis=2),axis=1)
# py_emp = np.sum(np.sum(pzyu_emp, axis=2),axis=0)
# pyu_emp = np.sum(pzyu_emp, axis=0)
# pz_yu_emp = pzyu_emp/np.expand_dims(pyu_emp, axis=0)
# pdb.set_trace()
return labels_conf, labels_deconf
def cb_par_front_n_back(index_n_labels,p,qyu,qzy,N):
pz_y = np.array([1-qzy, qzy]) # p(z/y) (correlated)
if qyu<0:
pv_y = np.array([-qyu, 1+qyu])
pu_y = np.array([1+qyu, -qyu])
else:
pv_y = np.array([1-qyu, qyu])
pu_y = np.array([qyu, 1-qyu])
filename = np.array(index_n_labels['filename'].tolist())
Y_all = np.array(index_n_labels['label'].tolist())
U_all = np.array(index_n_labels['conf'].tolist())
la_all = pd.DataFrame(data={'Y_all':Y_all, 'U_all':U_all})
Y = rand.binomial(1,p,N)
Z = rand.binomial(1,pz_y[Y])
U = rand.binomial(1,pu_y[Y])
yr = np.unique(Y); ur = np.unique(U); zr = np.unique(Z)
ur_r = np.unique(U_all); yr_r = np.unique(Y_all)
la = pd.DataFrame(data={'Y':Y,'U':U})
Ns = []; Ns_real = []; idn = []; idx = []
for y in yr:
for u in ur:
## since we are sampling from z
ns = len(la.index[(la['Z']==y) & (la['U']==u)].tolist())
Ns.append(ns)
idn += la.index[(la['Z']==y) & (la['U']==u)].tolist()
Ns_real.append(len(la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()))
idx += la_all.index[(la_all['Y_all']==yr_r[y]) & (la_all['U_all']==ur_r[u])].tolist()[:ns]
Y = Y[idn]; U = U[idn]; Z = Z[idn]
## to make sure that they can be used as indices in later part of the code
U = np.array(U, dtype=int); Y = np.array(Y, dtype=int) ; Z = np.array(Z, dtype=int)
## Step 1: estimate f(z,y), f(y) and f(z|y)
Nyz,_,_ = np.histogram2d(Y,Z,bins=[len(yr),len(zr)])
pyz_emp = Nyz/N
pz_emp = np.sum(pyz_emp, axis=0)
py_emp = np.sum(pyz_emp, axis=1)
pz_y_emp = np.transpose(pyz_emp)/py_emp
## estimate the f(z,y,u), f(y) and f()
mat = np.array([Z,Y,U]).transpose(1,0)
H, [by, bu, bz]= np.histogramdd(mat,bins=[len(yr),len(ur),len(zr)])
iz, iy, iu = np.where(H)
pzyu_emp = H/N
pu_emp = np.sum(np.sum(pzyu_emp, axis=0),axis=0)
pz_emp = np.sum(np.sum(pzyu_emp, axis=2),axis=1)
py_emp = np.sum(np.sum(pzyu_emp, axis=2),axis=0)
pyu_emp =
|
np.sum(pzyu_emp, axis=0)
|
numpy.sum
|
import numpy as np
import warnings
__all__ = [
# 'label_binarize',
# 'LabelBinarizer',
'LabelEncoder',
# 'MultiLabelBinarizer',
]
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def _encode_check_unknown(values, uniques, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
uniques : array
Allowed uniques values.
return_mask : bool, default False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `uniques` (the
unknown values).
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
if values.dtype == object:
uniques_set = set(uniques)
diff = list(set(values) - uniques_set)
if return_mask:
if diff:
valid_mask = np.array([val in uniques_set for val in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
else:
unique_values = np.unique(values)
diff = list(np.setdiff1d(unique_values, uniques, assume_unique=True))
if return_mask:
if diff:
valid_mask =
|
np.in1d(values, uniques)
|
numpy.in1d
|
"""Triangle/Tetrahedron Meshing
This module contains the class definition for the TriMesh class.
"""
# --------------------------------------------------------------------------- #
# #
# Import Modules #
# #
# --------------------------------------------------------------------------- #
from __future__ import division
from __future__ import print_function
import meshpy.tet
import meshpy.triangle
import numpy as np
import pygmsh as pg
from matplotlib import collections
from matplotlib import patches
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from microstructpy import _misc
__all__ = ['TriMesh']
__author__ = 'Kenneth (Kip) Hart'
# --------------------------------------------------------------------------- #
# #
# TriMesh Class #
# #
# --------------------------------------------------------------------------- #
class TriMesh(object):
"""Triangle/Tetrahedron mesh.
The TriMesh class contains the points, facets, and elements in a triangle/
tetrahedron mesh, also called an unstructured grid.
The points attribute is an Nx2 or Nx3 list of points in the mesh.
The elements attribute contains the Nx3 or Nx4 list of the points at
the corners of each triangle/tetrahedron. A list of facets can also be
included, though it is optional and does not need to include every facet
in the mesh. Attributes can also be assigned to the elements and facets,
though they are also optional.
Args:
points (list, numpy.ndarray): List of coordinates in the mesh.
elements (list, numpy.ndarray): List of indices of the points at
the corners of each element. The shape should be Nx3 in 2D or
Nx4 in 3D.
element_attributes (list, numpy.ndarray): *(optional)* A number
associated with each element.
Defaults to None.
facets (list, numpy.ndarray): *(optional)* A list of facets in the
mesh. The shape should be Nx2 in 2D or Nx3 in 3D.
Defaults to None.
facet_attributes (list, numpy.ndarray): *(optional)* A number
associated with each facet.
Defaults to None.
"""
# ----------------------------------------------------------------------- #
# Constructors #
# ----------------------------------------------------------------------- #
def __init__(self, points, elements, element_attributes=None, facets=None,
facet_attributes=None):
self.points = points
self.elements = elements
self.element_attributes = element_attributes
self.facets = facets
self.facet_attributes = facet_attributes
@classmethod
def from_file(cls, filename):
"""Read TriMesh from file.
This function reads in a triangular mesh from a file and creates an
instance from that file. Currently the only supported file type
is the output from :meth:`.write` with the ``format='str'`` option.
Args:
filename (str): Name of file to read from.
Returns:
TriMesh: An instance of the class.
"""
with open(filename, 'r') as file:
stage = 0
pts = []
elems = []
elem_atts = []
facets = []
facet_atts = []
n_eas = 0
n_facets = 0
n_fas = 0
for line in file.readlines():
if 'Mesh Points'.lower() in line.lower():
n_pts = int(line.split(':')[1])
stage = 'points'
elif 'Mesh Elements'.lower() in line.lower():
n_elems = int(line.split(':')[1])
stage = 'elements'
elif 'Element Attributes'.lower() in line.lower():
n_eas = int(line.split(':')[1])
stage = 'element attributes'
elif 'Facets'.lower() in line.lower():
n_facets = int(line.split(':')[1])
stage = 'facets'
elif 'Facet Attributes'.lower() in line.lower():
n_fas = int(line.split(':')[1])
stage = 'facet attributes'
else:
if stage == 'points':
pts.append([float(x) for x in line.split(',')])
elif stage == 'elements':
elems.append([int(kp) for kp in line.split(',')])
elif stage == 'element attributes':
elem_atts.append(_misc.from_str(line))
elif stage == 'facets':
if n_facets > 0:
facets.append([int(kp) for kp in line.split(',')])
elif stage == 'facet attributes':
if n_fas > 0:
facet_atts.append(_misc.from_str(line))
else:
pass
# check the inputs
assert len(pts) == n_pts
assert len(elems) == n_elems
assert len(elem_atts) == n_eas
assert len(facets) == n_facets
assert len(facet_atts) == n_fas
return cls(pts, elems, elem_atts, facets, facet_atts)
@classmethod
def from_polymesh(cls, polymesh, phases=None, mesher='Triangle/Tetgen',
min_angle=0, max_volume=float('inf'),
max_edge_length=float('inf'), mesh_size=float('inf')):
"""Create TriMesh from PolyMesh.
This constuctor creates a triangle/tetrahedron mesh from a polygon
mesh (:class:`.PolyMesh`). Polygons of the same seed number are
merged and the element attribute is set to the seed number it is
within. The facets between seeds are saved to the mesh and the index
of the facet is stored in the facet attributes.
Since the PolyMesh can include phase numbers for each region,
additional information about the phases can be included as an input.
The "phases" input should be a list of material phase dictionaries,
formatted according to the :ref:`phase_dict_guide` guide.
The minimum angle, maximum volume, and maximum edge length options
provide quality controls for the mesh. The phase type option can take
one of several values, described below.
* **crystalline**: granular, solid
* **amorphous**: glass, matrix
* **void**: crack, hole
The **crystalline** option creates a mesh where cells of the same seed
number are merged, but cells are not merged across seeds. _This is
the default material type._
The **amorphous** option creates a mesh where cells of the same
phase number are merged to create an amorphous region in the mesh.
Finally, the **void** option will merge neighboring void cells and
treat them as holes in the mesh.
Args:
polymesh (PolyMesh): A polygon/polyhedron mesh.
phases (list): *(optional)* A list of dictionaries containing
options for each phase.
Default is
``{'material_type': 'solid', 'max_volume': float('inf')}``.
mesher (str): {'Triangle/TetGen' | 'Triangle' | 'TetGen' | 'gmsh'}
specify the mesh generator. Default is 'Triangle/TetGen'.
min_angle (float): The minimum interior angle, in degrees, of an
element. This option is used with Triangle or TetGen and in 3D
is the minimum *dihedral* angle. Defaults to 0.
max_volume (float): The default maximum cell volume, used if one
is not set for each phase. This option is used with Triangle or
TetGen. Defaults to infinity, which turns off this control.
max_edge_length (float): The maximum edge length of elements
along grain boundaries. This option is used with Triangle
and gmsh. Defaults to infinity, which turns off this control.
mesh_size (float): The target size of the mesh elements. This
option is used with gmsh. Default is infinity, whihch turns off
this control.
"""
key = mesher.lower().strip()
if key in ('triangle/tetgen', 'triangle', 'tetgen'):
tri_args = _call_meshpy(polymesh, phases, min_angle, max_volume,
max_edge_length)
elif key == 'gmsh':
tri_args = _call_gmsh(polymesh, phases, mesh_size, max_edge_length)
return cls(*tri_args)
# ----------------------------------------------------------------------- #
# String and Representation Functions #
# ----------------------------------------------------------------------- #
def __str__(self):
nv = len(self.points)
nd = len(self.points[0])
pt_fmt = '\t'
pt_fmt += ', '.join(['{pt[' + str(i) + ']: e}' for i in range(nd)])
str_str = 'Mesh Points: ' + str(nv) + '\n'
str_str += ''.join([pt_fmt.format(pt=p) + '\n' for p in self.points])
str_str += 'Mesh Elements: ' + str(len(self.elements)) + '\n'
str_str += '\n'.join(['\t' + str(tuple(e))[1:-1] for e in
self.elements])
try:
str_str += '\nElement Attributes: '
str_str += str(len(self.element_attributes)) + '\n'
str_str += '\n'.join(['\t' + str(a) for a in
self.element_attributes])
except TypeError:
pass
try:
str_str += '\nFacets: ' + str(len(self.facets)) + '\n'
str_str += '\n'.join(['\t' + str(tuple(f))[1:-1] for f in
self.facets])
except TypeError:
pass
try:
str_str += '\nFacet Attributes: '
str_str += str(len(self.facet_attributes)) + '\n'
str_str += '\n'.join(['\t' + str(a) for a in
self.facet_attributes])
except TypeError:
pass
return str_str
def __repr__(self):
repr_str = 'TriMesh('
repr_str += ', '.join([repr(v) for v in (self.points, self.elements,
self.element_attributes, self.facets,
self.facet_attributes)])
repr_str += ')'
return repr_str
# ----------------------------------------------------------------------- #
# Write Function #
# ----------------------------------------------------------------------- #
def write(self, filename, format='txt', seeds=None, polymesh=None):
"""Write mesh to file.
This function writes the contents of the mesh to a file.
The format options are 'abaqus', 'tet/tri', 'txt', and 'vtk'.
See the :ref:`s_tri_file_io` section of the :ref:`c_file_formats`
guide for more details on these formats.
Args:
filename (str): The name of the file to write. In the cases of
TetGen/Triangle, this is the basename of the files.
format (str): {'abaqus' | 'tet/tri' | 'txt' | 'vtk'}
*(optional)* The format of the output file.
Default is 'txt'.
seeds (SeedList): *(optional)* List of seeds. If given, VTK files
will also include the phase number of of each element in the
mesh. This assumes the ``element_attributes``
field contains the seed number of each element.
polymesh (PolyMesh): *(optional)* Polygonal mesh used for
generating the triangular mesh. If given, will add surface
unions to Abaqus files - for easier specification of
boundary conditions.
""" # NOQA: E501
fmt = format.lower()
if fmt == 'abaqus':
# write top matter
abaqus = '*Heading\n'
abaqus += '** Job name: microstructure '
abaqus += 'Model name: microstructure_model\n'
abaqus += '** Generated by: MicroStructPy\n'
# write parts
abaqus += '**\n** PARTS\n**\n'
abaqus += '*Part, name=Part-1\n'
abaqus += '*Node\n'
abaqus += ''.join([str(i + 1) + ''.join([', ' + str(x) for x in
pt]) + '\n' for i, pt in
enumerate(self.points)])
n_dim = len(self.points[0])
elem_type = {2: 'CPS3', 3: 'C3D4'}[n_dim]
abaqus += '*Element, type=' + elem_type + '\n'
abaqus += ''.join([str(i + 1) + ''.join([', ' + str(kp + 1) for kp
in elem]) + '\n' for
i, elem in enumerate(self.elements)])
# Element sets - seed number
elset_n_per = 16
elem_atts = np.array(self.element_attributes)
for att in np.unique(elem_atts):
elset_name = 'Set-E-Seed-' + str(att)
elset_str = '*Elset, elset=' + elset_name + '\n'
elem_groups = [[]]
for elem_ind, elem_att in enumerate(elem_atts):
if ~np.isclose(elem_att, att):
continue
if len(elem_groups[-1]) >= elset_n_per:
elem_groups.append([])
elem_groups[-1].append(elem_ind + 1)
for group in elem_groups:
elset_str += ','.join([str(i) for i in group])
elset_str += '\n'
abaqus += elset_str
# Element Sets - phase number
if seeds is not None:
phase_nums = np.array([seed.phase for seed in seeds])
for phase_num in np.unique(phase_nums):
mask = phase_nums == phase_num
seed_nums = np.nonzero(mask)[0]
elset_name = 'Set-E-Material-' + str(phase_num)
elset_str = '*Elset, elset=' + elset_name + '\n'
groups = [[]]
for seed_num in seed_nums:
if seed_num not in elem_atts:
continue
if len(groups[-1]) >= elset_n_per:
groups.append([])
seed_elset_name = 'Set-E-Seed-' + str(seed_num)
groups[-1].append(seed_elset_name)
for group in groups:
elset_str += ','.join(group)
elset_str += '\n'
abaqus += elset_str
# Surfaces - Exterior and Interior
facets = np.array(self.facets)
facet_atts = np.array(self.facet_attributes)
face_ids = {2: [2, 3, 1], 3: [3, 4, 2, 1]}[n_dim]
for att in np.unique(facet_atts):
facet_name = 'Surface-' + str(att)
surf_str = '*Surface, name=' + facet_name + ', type=element\n'
att_facets = facets[facet_atts == att]
for facet in att_facets:
mask = np.isin(self.elements, facet)
n_match = mask.astype('int').sum(axis=1)
i_elem = np.argmax(n_match)
elem_id = i_elem + 1
i_missing = np.argmin(mask[i_elem])
face_id = face_ids[i_missing]
surf_str += str(elem_id) + ', S' + str(face_id) + '\n'
abaqus += surf_str
# Surfaces - Exterior
poly_neighbors = np.array(polymesh.facet_neighbors)
poly_mask = np.any(poly_neighbors < 0, axis=1)
neigh_nums = np.min(poly_neighbors, axis=1)
u_neighs = np.unique(neigh_nums[poly_mask])
for neigh_num in u_neighs:
mask = neigh_nums == neigh_num
facet_name = 'Ext-Surface-' + str(-neigh_num)
surf_str = '*Surface, name=' + facet_name + ', combine=union\n'
for i, flag in enumerate(mask):
if flag:
surf_str += 'Surface-' + str(i) + '\n'
abaqus += surf_str
# End Part
abaqus += '*End Part\n\n'
# Assembly
abaqus += '**\n'
abaqus += '** ASSEMBLY\n'
abaqus += '**\n'
abaqus += '*Assembly, name=assembly\n'
abaqus += '**\n'
# Instances
abaqus += '*Instance, name=I-Part-1, part=Part-1\n'
abaqus += '*End Instance\n'
# End Assembly
abaqus += '**\n'
abaqus += '*End Assembly\n'
with open(filename, 'w') as file:
file.write(abaqus)
elif fmt in ('str', 'txt'):
with open(filename, 'w') as file:
file.write(str(self) + '\n')
elif fmt == 'tet/tri':
# create boundary markers
bnd_mkrs = np.full(len(self.points), 0, dtype='int')
facet_arr = np.array(self.facets)
f_bnd_mkrs = np.full(len(self.facets), 0, dtype='int')
elem_arr = np.array(self.elements)
for elem in self.elements:
for i in range(len(elem)):
e_facet = np.delete(elem, i)
f_mask = np.full(elem_arr.shape[0], True)
for kp in e_facet:
f_mask &= np.any(elem_arr == kp, axis=-1)
if np.sum(f_mask) == 1:
bnd_mkrs[e_facet] = 1
f_mask = np.full(facet_arr.shape[0], True)
for kp in e_facet:
f_mask &= np.any(facet_arr == kp, axis=-1)
f_bnd_mkrs[f_mask] = 1
# write vertices
n_pts, n_dim = np.array(self.points).shape
nodes = ' '.join([str(n) for n in (n_pts, n_dim, 0, 1)]) + '\n'
nodes += ''.join([str(i) + ''.join([' ' + str(x) for x in pt]) +
' ' + str(bnd_mkrs[i]) + '\n' for i, pt in
enumerate(self.points)])
with open(filename + '.node', 'w') as file:
file.write(nodes)
# write elements
n_ele, n_kp = np.array(self.elements).shape
is_att = self.element_attributes is not None
n_att = int(is_att)
eles = ' '.join([str(n) for n in (n_ele, n_kp, n_att)]) + '\n'
for i, simplex in enumerate(self.elements):
e_str = ' '.join([str(kp) for kp in simplex])
if is_att:
e_str += ' ' + str(self.element_attributes[i])
e_str += '\n'
eles += e_str
with open(filename + '.ele', 'w') as file:
file.write(eles)
# Write edges/faces
if self.facets is not None:
ext = {2: '.edge', 3: '.face'}[n_dim]
n_facet, n_kp = np.array(self.facets).shape
edge = ' '.join([str(n) for n in (n_facet, n_kp, 1)])
edge += ''.join([str(i) + ''.join([' ' + str(k) for k in f]) +
' ' + str(mkr) + '\n' for f, mkr in
zip(self.facets, f_bnd_mkrs)])
with open(filename + ext, 'w') as file:
file.write(edge)
elif fmt == 'vtk':
n_kp = len(self.elements[0])
mesh_type = {3: 'Triangular', 4: 'Tetrahedral'}[n_kp]
pt_fmt = '{: f} {: f} {: f}\n'
# write heading
vtk = '# vtk DataFile Version 2.0\n'
vtk += '{} mesh\n'.format(mesh_type)
vtk += 'ASCII\n'
vtk += 'DATASET UNSTRUCTURED_GRID\n'
# Write points
vtk += 'POINTS ' + str(len(self.points)) + ' float\n'
if len(self.points[0]) == 2:
vtk += ''.join([pt_fmt.format(x, y, 0) for x, y in
self.points])
else:
vtk += ''.join([pt_fmt.format(x, y, z) for x, y, z in
self.points])
# write elements
n_elem = len(self.elements)
cell_fmt = str(n_kp) + n_kp * ' {}' + '\n'
cell_sz = (1 + n_kp) * n_elem
vtk += '\nCELLS ' + str(n_elem) + ' ' + str(cell_sz) + '\n'
vtk += ''.join([cell_fmt.format(*el) for el in self.elements])
# write cell type
vtk += '\nCELL_TYPES ' + str(n_elem) + '\n'
cell_type = {3: '5', 4: '10'}[n_kp]
vtk += ''.join(n_elem * [cell_type + '\n'])
# write element attributes
try:
int(self.element_attributes[0])
att_type = 'int'
except TypeError:
att_type = 'float'
vtk += '\nCELL_DATA ' + str(n_elem) + '\n'
vtk += 'SCALARS element_attributes ' + att_type + ' 1 \n'
vtk += 'LOOKUP_TABLE element_attributes\n'
vtk += ''.join([str(a) + '\n' for a in self.element_attributes])
# Write phase numbers
if seeds is not None:
vtk += '\nSCALARS phase_numbers int 1 \n'
vtk += 'LOOKUP_TABLE phase_numbers\n'
vtk += ''.join([str(seeds[a].phase) + '\n' for a in
self.element_attributes])
with open(filename, 'w') as file:
file.write(vtk)
else:
e_str = 'Cannot write file type ' + str(format) + ' yet.'
raise NotImplementedError(e_str)
# ----------------------------------------------------------------------- #
# Plot Function #
# ----------------------------------------------------------------------- #
def plot(self, index_by='element', material=[], loc=0, **kwargs):
"""Plot the mesh.
This method plots the mesh using matplotlib.
In 2D, this creates a :class:`matplotlib.collections.PolyCollection`
and adds it to the current axes.
In 3D, it creates a
:class:`mpl_toolkits.mplot3d.art3d.Poly3DCollection` and
adds it to the current axes.
The keyword arguments are passed though to matplotlib.
Args:
index_by (str): *(optional)* {'element' | 'attribute'}
Flag for indexing into the other arrays passed into the
function. For example,
``plot(index_by='attribute', color=['blue', 'red'])`` will plot
the elements with ``element_attribute`` equal to 0 in blue, and
elements with ``element_attribute`` equal to 1 in red.
Note that in 3D the facets are plotted instead of the elements,
so kwarg lists must be based on ``facets`` and
``facet_attributes``. Defaults to 'element'.
material (list): *(optional)* Names of material phases. One entry
per material phase (the ``index_by`` argument is ignored).
If this argument is set, a legend is added to the plot with
one entry per material. Note that the ``element_attributes``
in 2D or the ``facet_attributes`` in 3D must be the material
numbers for the legend to be formatted properly.
loc (int or str): *(optional)* The location of the legend,
if 'material' is specified. This argument is passed directly
through to :func:`matplotlib.pyplot.legend`. Defaults to 0,
which is 'best' in matplotlib.
**kwargs: Keyword arguments that are passed through to matplotlib.
"""
n_dim = len(self.points[0])
if n_dim == 2:
ax = plt.gca()
else:
ax = plt.gcf().gca(projection=Axes3D.name)
n_obj = _misc.ax_objects(ax)
if n_obj > 0:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
else:
xlim = [float('inf'), -float('inf')]
ylim = [float('inf'), -float('inf')]
if n_dim == 2:
_plot_2d(ax, self, index_by, **kwargs)
else:
if n_obj > 0:
zlim = ax.get_zlim()
else:
zlim = [float('inf'), -float('inf')]
xy = [np.array([self.points[kp] for kp in f]) for f in self.facets]
plt_kwargs = {}
for key, value in kwargs.items():
if type(value) in (list, np.array):
plt_value = []
for f_num, f_att in enumerate(self.facet_attributes):
if index_by == 'element':
ind = f_num
elif index_by == 'attribute':
ind = int(f_att)
else:
e_str = 'Cannot index by {}.'.format(index_by)
raise ValueError(e_str)
if ind < len(value):
v = value[ind]
else:
v = 'none'
plt_value.append(v)
else:
plt_value = value
plt_kwargs[key] = plt_value
pc = Poly3DCollection(xy, **plt_kwargs)
ax.add_collection(pc)
# Add legend
if material and index_by == 'attribute':
p_kwargs = [{'label': m} for m in material]
for key, value in kwargs.items():
if type(value) not in (list, np.array):
for kws in p_kwargs:
kws[key] = value
for i, m in enumerate(material):
if type(value) in (list, np.array):
p_kwargs[i][key] = value[i]
else:
p_kwargs[i][key] = value
# Replace plural keywords
for p_kw in p_kwargs:
for kw in _misc.mpl_plural_kwargs:
if kw in p_kw:
p_kw[kw[:-1]] = p_kw[kw]
del p_kw[kw]
handles = [patches.Patch(**p_kw) for p_kw in p_kwargs]
ax.legend(handles=handles, loc=loc)
# Adjust Axes
mins = np.array(self.points).min(axis=0)
maxs = np.array(self.points).max(axis=0)
xlim = (min(xlim[0], mins[0]), max(xlim[1], maxs[0]))
ylim = (min(ylim[0], mins[1]), max(ylim[1], maxs[1]))
if n_dim == 2:
plt.axis('square')
plt.xlim(xlim)
plt.ylim(ylim)
elif n_dim == 3:
zlim = (min(zlim[0], mins[2]), max(zlim[1], maxs[2]))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_zlim(zlim)
_misc.axisEqual3D(ax)
# --------------------------------------------------------------------------- #
# #
# RasterMesh Class #
# #
# --------------------------------------------------------------------------- #
class RasterMesh(TriMesh):
"""Raster mesh.
The RasterMesh class contains the points and elements in a raster mesh,
also called an regular grid.
The points attribute is an Nx2 or Nx3 list of points in the mesh.
The elements attribute contains the Nx4 or Nx8 list of the points at
the corners of each pixel/voxel. A list of facets can also be
included, though it is optional and does not need to include every facet
in the mesh. Attributes can also be assigned to the elements and facets,
though they are also optional.
Args:
points (list, numpy.ndarray): List of coordinates in the mesh.
elements (list, numpy.ndarray): List of indices of the points at
the corners of each element. The shape should be Nx3 in 2D or
Nx4 in 3D.
element_attributes (list, numpy.ndarray): *(optional)* A number
associated with each element.
Defaults to None.
facets (list, numpy.ndarray): *(optional)* A list of facets in the
mesh. The shape should be Nx2 in 2D or Nx3 in 3D.
Defaults to None.
facet_attributes (list, numpy.ndarray): *(optional)* A number
associated with each facet.
Defaults to None.
"""
# ----------------------------------------------------------------------- #
# Constructors #
# ----------------------------------------------------------------------- #
# Inherited from TriMesh
@classmethod
def from_polymesh(cls, polymesh, mesh_size, phases=None):
"""Create RasterMesh from PolyMesh.
This constuctor creates a raster mesh from a polygon
mesh (:class:`.PolyMesh`). Polygons of the same seed number are
merged and the element attribute is set to the seed number it is
within. The facets between seeds are saved to the mesh and the index
of the facet is stored in the facet attributes.
Since the PolyMesh can include phase numbers for each region,
additional information about the phases can be included as an input.
The "phases" input should be a list of material phase dictionaries,
formatted according to the :ref:`phase_dict_guide` guide.
The mesh_size option determines the side length of each pixel/voxel.
Element attributes are sampled at the center of each pixel/voxel.
If an edge of a domain is not an integer multiple of the mesh_size, it
will be clipped. For example, if mesh_size is 3 and an edge has
bounds [0, 11], the sides of the pixels will be at 0, 3, 6, and 9 while
the centers of the pixels will be at 1.5, 4.5, 7.5.
The phase type option can take one of several values, described below.
* **crystalline**: granular, solid
* **amorphous**: glass, matrix
* **void**: crack, hole
The **crystalline** option creates a mesh where cells of the same seed
number are merged, but cells are not merged across seeds. _This is
the default material type._
The **amorphous** option creates a mesh where cells of the same
phase number are merged to create an amorphous region in the mesh.
Finally, the **void** option will merge neighboring void cells and
treat them as holes in the mesh.
Args:
polymesh (PolyMesh): A polygon/polyhedron mesh.
mesh_size (float): The side length of each pixel/voxel.
phases (list): *(optional)* A list of dictionaries containing
options for each phase.
Default is
``{'material_type': 'solid', 'max_volume': float('inf')}``.
"""
# 1. Create node and element grids
p_pts = np.array(polymesh.points)
mins = p_pts.min(axis=0)
maxs = p_pts.max(axis=0)
lens = (maxs - mins)*(1 + 1e-9)
sides = [lb + np.arange(0, dlen, mesh_size) for lb, dlen in
zip(mins, lens)]
mgrid = np.meshgrid(*sides)
nodes = np.array([g.flatten() for g in mgrid]).T
node_nums = np.arange(mgrid[0].size).reshape(mgrid[0].shape)
n_dim = len(mins)
if n_dim == 2:
m, n = node_nums.shape
kp1 = node_nums[:(m-1), :(n-1)].flatten()
kp2 = node_nums[1:m, :(n-1)].flatten()
kp3 = node_nums[1:m, 1:n].flatten()
kp4 = node_nums[:(m-1), 1:n].flatten()
elems = np.array([kp1, kp2, kp3, kp4]).T
elif n_dim == 3:
m, n, p = node_nums.shape
kp1 = node_nums[:(m-1), :(n-1), :(p-1)].flatten()
kp2 = node_nums[1:m, :(n-1), :(p-1)].flatten()
kp3 = node_nums[1:m, 1:n, :(p-1)].flatten()
kp4 = node_nums[:(m-1), 1:n, :(p-1)].flatten()
kp5 = node_nums[:(m-1), :(n-1), 1:p].flatten()
kp6 = node_nums[1:m, :(n-1), 1:p].flatten()
kp7 = node_nums[1:m, 1:n, 1:p].flatten()
kp8 = node_nums[:(m-1), 1:n, 1:p].flatten()
elems = np.array([kp1, kp2, kp3, kp4, kp5, kp6, kp7, kp8]).T
else:
raise NotImplementedError
# 2. Compute element centers
cens = nodes[elems[:, 0]] + 0.5 * mesh_size
# 3. For each region:
i_remain = np.arange(cens.shape[0])
elem_regs = np.full(cens.shape[0], -1)
elem_atts = np.full(cens.shape[0], -1)
for r_num, region in enumerate(polymesh.regions):
# A. Create a bounding box
r_kps = np.unique([k for f in region for k in polymesh.facets[f]])
r_pts = p_pts[r_kps]
r_mins = r_pts.min(axis=0)
r_maxs = r_pts.max(axis=0)
# B. Isolate element centers with box
r_i_remain = np.copy(i_remain)
for i, lb in enumerate(r_mins):
ub = r_maxs[i]
x = cens[r_i_remain, i]
in_range = (x >= lb) & (x <= ub)
r_i_remain = r_i_remain[in_range]
# C. For each facet, remove centers on the wrong side
# note: regions are convex, so mean pt is on correct side of facets
r_cen = r_pts.mean(axis=0)
for f in region:
f_kps = polymesh.facets[f]
f_pts = p_pts[f_kps]
u_in, f_cen = _facet_in_normal(f_pts, r_cen)
rel_pos = cens[r_i_remain] - f_cen
dp = rel_pos.dot(u_in)
inside = dp >= 0
r_i_remain = r_i_remain[inside]
# D. Assign remaining centers to region
elem_regs[r_i_remain] = r_num
elem_atts[r_i_remain] = polymesh.seed_numbers[r_num]
i_remain = np.setdiff1d(i_remain, r_i_remain)
# 4. Combine regions of the same seed number
if phases is not None:
conv_dict = _amorphous_seed_numbers(polymesh, phases)
elem_atts = np.array([conv_dict.get(s, s) for s in elem_atts])
# 5. Define remaining facets, inherit their attributes
facets = []
facet_atts = []
for f_num, f_neighs in enumerate(polymesh.facet_neighbors):
n1, n2 = f_neighs
if n1 >= 0:
e1 = elems[elem_regs == n1]
e2 = elems[elem_regs == n2]
# Shift +x
e1_s = e1[:, 1]
e2_s = e2[:, 0]
mask = np.isin(e1_s, e2_s)
for elem in e1[mask]:
if n_dim == 2:
facet = elem[[1, 2]]
else:
facet = elem[[1, 2, 6, 5]]
facets.append(facet)
facet_atts.append(f_num)
# Shift -x
e1_s = e1[:, 0]
e2_s = e2[:, 1]
mask = np.isin(e1_s, e2_s)
for elem in e1[mask]:
if n_dim == 2:
facet = elem[[3, 0]]
else:
facet = elem[[0, 4, 7, 3]]
facets.append(facet)
facet_atts.append(f_num)
# Shift +y
e1_s = e1[:, 3]
e2_s = e2[:, 0]
mask = np.isin(e1_s, e2_s)
for elem in e1[mask]:
if n_dim == 2:
facet = elem[[2, 3]]
else:
facet = elem[[2, 3, 7, 6]]
facets.append(facet)
facet_atts.append(f_num)
# Shift -y
e1_s = e1[:, 0]
e2_s = e2[:, 3]
mask = np.isin(e1_s, e2_s)
for elem in e1[mask]:
if n_dim == 2:
facet = elem[[0, 1]]
else:
facet = elem[[0, 1, 5, 4]]
facets.append(facet)
facet_atts.append(f_num)
if n_dim < 3:
continue
# Shift +z
e1_s = e1[:, 4]
e2_s = e1[:, 0]
mask = np.isin(e1_s, e2_s)
for elem in e1[mask]:
facet = elem[[4, 5, 6, 7]]
facets.append(facet)
facet_atts.append(f_num)
# Shift -z
e1_s = e1[:, 0]
e2_s = e1[:, 4]
mask = np.isin(e1_s, e2_s)
for elem in e1[mask]:
facet = elem[[0, 1, 2, 3]]
facets.append(facet)
facet_atts.append(f_num)
elif n1 == -1:
# -x face
e2 = elems[elem_regs == n2]
x2 = nodes[e2[:, 0], 0]
mask = np.isclose(x2, mins[0])
for elem in e2[mask]:
if n_dim == 2:
facet = elem[[3, 0]]
else:
facet = elem[[0, 4, 7, 3]]
facets.append(facet)
facet_atts.append(f_num)
elif n1 == -2:
# +x face
e2 = elems[elem_regs == n2]
x2 = nodes[e2[:, 1], 0]
mask = np.isclose(x2, maxs[0])
for elem in e2[mask]:
if n_dim == 2:
facet = elem[[1, 2]]
else:
facet = elem[[1, 2, 6, 5]]
facets.append(facet)
facet_atts.append(f_num)
elif n1 == -3:
# -y face
e2 = elems[elem_regs == n2]
x2 = nodes[e2[:, 0], 1]
mask = np.isclose(x2, mins[1])
for elem in e2[mask]:
if n_dim == 2:
facet = elem[[0, 1]]
else:
facet = elem[[0, 1, 5, 4]]
facets.append(facet)
facet_atts.append(f_num)
elif n1 == -4:
# +y face
e2 = elems[elem_regs == n2]
x2 = nodes[e2[:, 2], 1]
mask = np.isclose(x2, maxs[1])
for elem in e2[mask]:
if n_dim == 2:
facet = elem[[2, 3]]
else:
facet = elem[[2, 3, 7, 6]]
facets.append(facet)
facet_atts.append(f_num)
elif n1 == -5:
# -z face
e2 = elems[elem_regs == n2]
x2 = nodes[e2[:, 0], 2]
mask = np.isclose(x2, mins[2])
for elem in e2[mask]:
facet = elem[[0, 1, 2, 3]]
facets.append(facet)
facet_atts.append(f_num)
elif n1 == -6:
# +z face
e2 = elems[elem_regs == n2]
x2 = nodes[e2[:, 4], 2]
mask = x2 == maxs[2]
for elem in e2[mask]:
facet = elem[[4, 5, 6, 7]]
facets.append(facet)
facet_atts.append(f_num)
# 6. Remove voids and excess cells
if phases is not None:
att_rm = [-1]
for i, phase in enumerate(phases):
if phase.get('material_type', 'solid') in _misc.kw_void:
r_mask = np.array(polymesh.phase_numbers) == i
seeds = np.unique(np.array(polymesh.seed_numbers)[r_mask])
att_rm.extend(list(seeds))
# Remove elements
rm_mask = np.isin(elem_atts, att_rm)
elems = elems[~rm_mask]
elem_atts = elem_atts[~rm_mask]
# Re-number nodes
nodes_mask = np.isin(np.arange(nodes.shape[0]), elems)
n_remain = np.sum(nodes_mask)
node_n_conv = np.arange(nodes.shape[0])
node_n_conv[nodes_mask] = np.arange(n_remain)
nodes = nodes[nodes_mask]
elems = node_n_conv[elems]
if len(facets) > 0:
f_keep = np.all(nodes_mask[facets], axis=1)
facets = node_n_conv[np.array(facets)[f_keep, :]]
facet_atts = np.array(facet_atts)[f_keep]
return cls(nodes, elems, elem_atts, facets, facet_atts)
# ----------------------------------------------------------------------- #
# String and Representation Functions #
# ----------------------------------------------------------------------- #
# __str__ inherited from TriMesh
def __repr__(self):
repr_str = 'RasterMesh('
repr_str += ', '.join([repr(v) for v in (self.points, self.elements,
self.element_attributes, self.facets,
self.facet_attributes)])
repr_str += ')'
return repr_str
# ----------------------------------------------------------------------- #
# Write Function #
# ----------------------------------------------------------------------- #
def write(self, filename, format='txt', seeds=None, polymesh=None):
"""Write mesh to file.
This function writes the contents of the mesh to a file.
The format options are 'abaqus', 'txt', and 'vtk'.
See the :ref:`s_tri_file_io` section of the :ref:`c_file_formats`
guide for more details on these formats.
Args:
filename (str): The name of the file to write.
format (str): {'abaqus' | 'txt' | 'vtk'}
*(optional)* The format of the output file.
Default is 'txt'.
seeds (SeedList): *(optional)* List of seeds. If given, VTK files
will also include the phase number of of each element in the
mesh. This assumes the ``element_attributes``
field contains the seed number of each element.
polymesh (PolyMesh): *(optional)* Polygonal mesh used for
generating the raster mesh. If given, will add surface
unions to Abaqus files - for easier specification of
boundary conditions.
""" # NOQA: E501
fmt = format.lower()
if fmt == 'abaqus':
# write top matter
abaqus = '*Heading\n'
abaqus += '** Job name: microstructure '
abaqus += 'Model name: microstructure_model\n'
abaqus += '** Generated by: MicroStructPy\n'
# write parts
abaqus += '**\n** PARTS\n**\n'
abaqus += '*Part, name=Part-1\n'
abaqus += '*Node\n'
abaqus += ''.join([str(i + 1) + ''.join([', ' + str(x) for x in
pt]) + '\n' for i, pt in
enumerate(self.points)])
n_dim = len(self.points[0])
elem_type = {2: 'CPS4', 3: 'C3D8'}[n_dim]
abaqus += '*Element, type=' + elem_type + '\n'
abaqus += ''.join([str(i + 1) + ''.join([', ' + str(kp + 1) for kp
in elem]) + '\n' for
i, elem in enumerate(self.elements)])
# Element sets - seed number
elset_n_per = 16
elem_atts = np.array(self.element_attributes)
for att in np.unique(elem_atts):
elset_name = 'Set-E-Seed-' + str(att)
elset_str = '*Elset, elset=' + elset_name + '\n'
elem_groups = [[]]
for elem_ind, elem_att in enumerate(elem_atts):
if ~np.isclose(elem_att, att):
continue
if len(elem_groups[-1]) >= elset_n_per:
elem_groups.append([])
elem_groups[-1].append(elem_ind + 1)
for group in elem_groups:
elset_str += ','.join([str(i) for i in group])
elset_str += '\n'
abaqus += elset_str
# Element Sets - phase number
if seeds is not None:
phase_nums = np.array([seed.phase for seed in seeds])
for phase_num in np.unique(phase_nums):
mask = phase_nums == phase_num
seed_nums = np.nonzero(mask)[0]
elset_name = 'Set-E-Material-' + str(phase_num)
elset_str = '*Elset, elset=' + elset_name + '\n'
groups = [[]]
for seed_num in seed_nums:
if seed_num not in elem_atts:
continue
if len(groups[-1]) >= elset_n_per:
groups.append([])
seed_elset_name = 'Set-E-Seed-' + str(seed_num)
groups[-1].append(seed_elset_name)
for group in groups:
elset_str += ','.join(group)
elset_str += '\n'
abaqus += elset_str
# Surfaces - Exterior and Interior
facets = np.array(self.facets)
facet_atts = np.array(self.facet_attributes)
face_ids = {2: [2, 3, 1], 3: [3, 4, 2, 1]}[n_dim]
for att in np.unique(facet_atts):
facet_name = 'Surface-' + str(att)
surf_str = '*Surface, name=' + facet_name + ', type=element\n'
att_facets = facets[facet_atts == att]
for facet in att_facets:
mask = np.isin(self.elements, facet)
n_match = mask.astype('int').sum(axis=1)
i_elem = np.argmax(n_match)
elem_id = i_elem + 1
i_missing = np.argmin(mask[i_elem])
face_id = face_ids[i_missing]
surf_str += str(elem_id) + ', S' + str(face_id) + '\n'
abaqus += surf_str
# Surfaces - Exterior
poly_neighbors = np.array(polymesh.facet_neighbors)
poly_mask = np.any(poly_neighbors < 0, axis=1)
neigh_nums = np.min(poly_neighbors, axis=1)
u_neighs = np.unique(neigh_nums[poly_mask])
for neigh_num in u_neighs:
mask = neigh_nums == neigh_num
facet_name = 'Ext-Surface-' + str(-neigh_num)
surf_str = '*Surface, name=' + facet_name + ', combine=union\n'
for i, flag in enumerate(mask):
if flag:
surf_str += 'Surface-' + str(i) + '\n'
abaqus += surf_str
# End Part
abaqus += '*End Part\n\n'
# Assembly
abaqus += '**\n'
abaqus += '** ASSEMBLY\n'
abaqus += '**\n'
abaqus += '*Assembly, name=assembly\n'
abaqus += '**\n'
# Instances
abaqus += '*Instance, name=I-Part-1, part=Part-1\n'
abaqus += '*End Instance\n'
# End Assembly
abaqus += '**\n'
abaqus += '*End Assembly\n'
with open(filename, 'w') as file:
file.write(abaqus)
elif fmt in ('str', 'txt'):
with open(filename, 'w') as file:
file.write(str(self) + '\n')
elif fmt == 'vtk':
n_kp = len(self.elements[0])
mesh_type = {4: 'Pixel', 8: 'Voxel'}[n_kp]
pt_fmt = '{: f} {: f} {: f}\n'
# write heading
vtk = '# vtk DataFile Version 2.0\n'
vtk += '{} mesh\n'.format(mesh_type)
vtk += 'ASCII\n'
vtk += 'DATASET UNSTRUCTURED_GRID\n'
# Write points
vtk += 'POINTS ' + str(len(self.points)) + ' float\n'
if len(self.points[0]) == 2:
vtk += ''.join([pt_fmt.format(x, y, 0) for x, y in
self.points])
else:
vtk += ''.join([pt_fmt.format(x, y, z) for x, y, z in
self.points])
# write elements
n_elem = len(self.elements)
cell_fmt = str(n_kp) + n_kp * ' {}' + '\n'
cell_sz = (1 + n_kp) * n_elem
vtk += '\nCELLS ' + str(n_elem) + ' ' + str(cell_sz) + '\n'
vtk += ''.join([cell_fmt.format(*el) for el in self.elements])
# write cell type
vtk += '\nCELL_TYPES ' + str(n_elem) + '\n'
cell_type = {4: '9', 8: '12'}[n_kp]
vtk += ''.join(n_elem * [cell_type + '\n'])
# write element attributes
try:
int(self.element_attributes[0])
att_type = 'int'
except TypeError:
att_type = 'float'
vtk += '\nCELL_DATA ' + str(n_elem) + '\n'
vtk += 'SCALARS element_attributes ' + att_type + ' 1 \n'
vtk += 'LOOKUP_TABLE element_attributes\n'
vtk += ''.join([str(a) + '\n' for a in self.element_attributes])
# Write phase numbers
if seeds is not None:
vtk += '\nSCALARS phase_numbers int 1 \n'
vtk += 'LOOKUP_TABLE phase_numbers\n'
vtk += ''.join([str(seeds[a].phase) + '\n' for a in
self.element_attributes])
with open(filename, 'w') as file:
file.write(vtk)
else:
e_str = 'Cannot write file type ' + str(format) + ' yet.'
raise NotImplementedError(e_str)
# ----------------------------------------------------------------------- #
# As Array Functions #
# ----------------------------------------------------------------------- #
@property
def mesh_size(self):
"""Side length of elements."""
e0 = self.elements[0]
s0 = np.array(self.points[e0[1]]) - np.array(self.points[e0[0]])
return np.linalg.norm(s0)
def as_array(self, element_attributes=True):
"""numpy.ndarray containing element attributes.
Array contains -1 where there are no elements (e.g. circular domains).
Args:
element_attributes (bool): *(optional)* Flag to return element
attributes in the array. Set to True return attributes and
set to False to return element indices. Defaults to True.
Returns:
numpy.ndarray: Array of values of element atttributes, or indices.
"""
# 1. Convert 1st node of each element into array indices
pts =
|
np.array(self.points)
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A collection of useful functions.
"""
from __future__ import (print_function, division)
from six.moves import range
import sys
import warnings
import math
import scipy.misc as misc
import numpy as np
import copy
from .results import Results
__all__ = ["unitcheck", "resample_equal", "mean_and_cov", "quantile",
"jitter_run", "resample_run", "simulate_run", "reweight_run",
"unravel_run", "merge_runs", "kl_divergence", "kld_error",
"_merge_two", "_get_nsamps_samples_n"]
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
def unitcheck(u, nonperiodic=None):
"""Check whether `u` is inside the unit cube. Given a masked array
`nonperiodic`, also allows periodic boundaries conditions to exceed
the unit cube."""
if nonperiodic is None:
# No periodic boundary conditions provided.
return np.all(u > 0.) and np.all(u < 1.)
else:
# Alternating periodic and non-periodic boundary conditions.
return (np.all(u[nonperiodic] > 0.) and
np.all(u[nonperiodic] < 1.) and
np.all(u[~nonperiodic] > -0.5) and
np.all(u[~nonperiodic] < 1.5))
def mean_and_cov(samples, weights):
"""
Compute the weighted mean and covariance of the samples.
Parameters
----------
samples : `~numpy.ndarray` with shape (nsamples, ndim)
2-D array containing data samples. This ordering is equivalent to
using `rowvar=False` in `~numpy.cov`.
weights : `~numpy.ndarray` with shape (nsamples,)
1-D array of sample weights.
Returns
-------
mean : `~numpy.ndarray` with shape (ndim,)
Weighted sample mean vector.
cov : `~numpy.ndarray` with shape (ndim, ndim)
Weighted sample covariance matrix.
Notes
-----
Implements the formulae found `here <https://goo.gl/emWFLR>`_.
"""
# Compute the weighted mean.
mean = np.average(samples, weights=weights, axis=0)
# Compute the weighted covariance.
dx = samples - mean
wsum = np.sum(weights)
w2sum = np.sum(weights**2)
cov = wsum / (wsum**2 - w2sum) * np.einsum('i,ij,ik', weights, dx, dx)
return mean, cov
def resample_equal(samples, weights, rstate=None):
"""
Resample a new set of points from the weighted set of inputs
such that they all have equal weight.
Each input sample appears in the output array either
`floor(weights[i] * nsamples)` or `ceil(weights[i] * nsamples)` times,
with `floor` or `ceil` randomly selected (weighted by proximity).
Parameters
----------
samples : `~numpy.ndarray` with shape (nsamples,)
Set of unequally weighted samples.
weights : `~numpy.ndarray` with shape (nsamples,)
Corresponding weight of each sample.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
Returns
-------
equal_weight_samples : `~numpy.ndarray` with shape (nsamples,)
New set of samples with equal weights.
Examples
--------
>>> x = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
>>> w = np.array([0.6, 0.2, 0.15, 0.05])
>>> utils.resample_equal(x, w)
array([[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 3., 3.]])
Notes
-----
Implements the systematic resampling method described in `<NAME>, and
Gustafsson (2006) <doi:10.1109/NSSPW.2006.4378824>`_.
"""
if rstate is None:
rstate = np.random
if abs(np.sum(weights) - 1.) > SQRTEPS: # same tol as in np.random.choice.
raise ValueError("Weights do not sum to 1.")
# Make N subdivisions and choose positions with a consistent random offset.
nsamples = len(weights)
positions = (rstate.random() + np.arange(nsamples)) / nsamples
# Resample the data.
idx = np.zeros(nsamples, dtype=np.int)
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < nsamples:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
return samples[idx]
def quantile(x, q, weights=None):
"""
Compute (weighted) quantiles from an input set of samples.
Parameters
----------
x : `~numpy.ndarray` with shape (nsamps,)
Input samples.
q : `~numpy.ndarray` with shape (nquantiles,)
The list of quantiles to compute from `[0., 1.]`.
weights : `~numpy.ndarray` with shape (nsamps,), optional
The associated weight from each sample.
Returns
-------
quantiles : `~numpy.ndarray` with shape (nquantiles,)
The weighted sample quantiles computed at `q`.
"""
# Initial check.
x = np.atleast_1d(x)
q = np.atleast_1d(q)
# Quantile check.
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0. and 1.")
if weights is None:
# If no weights provided, this simply calls `np.percentile`.
return np.percentile(x, list(100.0 * q))
else:
# If weights are provided, compute the weighted quantiles.
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x).")
idx = np.argsort(x) # sort samples
sw = weights[idx] # sort weights
cdf = np.cumsum(sw)[:-1] # compute CDF
cdf /= cdf[-1] # normalize CDF
cdf = np.append(0, cdf) # ensure proper span
quantiles = np.interp(q, cdf, x[idx]).tolist()
return quantiles
def _get_nsamps_samples_n(res):
""" Helper function for calculating the number of samples
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
Returns
-------
nsamps: int
The total number of samples
samples_n: array
Number of live points at a given iteration
"""
try:
# Check if the number of live points explicitly changes.
samples_n = res.samples_n
nsamps = len(samples_n)
except:
# If the number of live points is constant, compute `samples_n`.
niter = res.niter
nlive = res.nlive
nsamps = len(res.logvol)
if nsamps == niter:
samples_n = np.ones(niter, dtype='int') * nlive
elif nsamps == (niter + nlive):
samples_n = np.append(np.ones(niter, dtype='int') * nlive,
np.arange(1, nlive + 1)[::-1])
else:
raise ValueError("Final number of samples differs from number of "
"iterations and number of live points.")
return nsamps, samples_n
def jitter_run(res, rstate=None, approx=False):
"""
Probes **statistical uncertainties** on a nested sampling run by
explicitly generating a *realization* of the prior volume associated
with each sample (dead point). Companion function to :meth:`resample_run`
and :meth:`simulate_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
weights based on our "jittered" prior volume realizations.
"""
if rstate is None:
rstate = np.random
# Initialize evolution of live points over the course of the run.
nsamps, samples_n = _get_nsamps_samples_n(res)
logl = res.logl
# Simulate the prior volume shrinkage associated with our set of "dead"
# points. At each iteration, if the number of live points is constant or
# increasing, our prior volume compresses by the maximum value of a set
# of `K_i` uniformly distributed random numbers (i.e. as `Beta(K_i, 1)`).
# If instead the number of live points is decreasing, that means we're
# instead sampling down a set of uniform random variables
# (i.e. uniform order statistics).
nlive_flag = np.ones(nsamps, dtype='bool')
nlive_start, bounds = [], []
if not approx:
# Find all instances where the number of live points is either constant
# or increasing.
nlive_flag[1:] = np.diff(samples_n) >= 0
# For all the portions that are decreasing, find out where they start,
# where they end, and how many live points are present at that given
# iteration.
if np.any(~nlive_flag):
i = 0
while i < nsamps:
if not nlive_flag[i]:
bound = []
bound.append(i-1)
nlive_start.append(samples_n[i-1])
while i < nsamps and not nlive_flag[i]:
i += 1
bound.append(i)
bounds.append(bound)
i += 1
# The maximum out of a set of `K_i` uniformly distributed random variables
# has a marginal distribution of `Beta(K_i, 1)`.
t_arr = np.zeros(nsamps)
t_arr[nlive_flag] = rstate.beta(a=samples_n[nlive_flag], b=1)
# If we instead are sampling the set of uniform order statistics,
# we note that the jth largest value is marginally distributed as
# `Beta(j, K_i-j+1)`. The full joint distribution is::
#
# X_(j) / X_N = (Y_1 + ... + Y_j) / (Y_1 + ... + Y_{K+1})
#
# where X_(j) is the prior volume of the live point with the `j`-th
# *best* likelihood (i.e. prior volume shrinks as likelihood increases)
# and the `Y_i`'s are i.i.d. exponentially distributed random variables.
nunif = len(nlive_start)
for i in range(nunif):
nstart = nlive_start[i]
bound = bounds[i]
sn = samples_n[bound[0]:bound[1]]
y_arr = rstate.exponential(scale=1.0, size=nstart+1)
ycsum = y_arr.cumsum()
ycsum /= ycsum[-1]
uorder = ycsum[np.append(nstart, sn-1)]
rorder = uorder[1:] / uorder[:-1]
t_arr[bound[0]:bound[1]] = rorder
# These are the "compression factors" at each iteration. Let's turn
# these into associated ln(volumes).
logvol = np.log(t_arr).cumsum()
# Compute weights using quadratic estimator.
h = 0.
logz = -1.e300
loglstar = -1.e300
logzvar = 0.
logvols_pad = np.concatenate(([0.], logvol))
logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(nsamps),
-np.ones(nsamps)])
logdvols += math.log(0.5)
dlvs = -np.diff(np.append(0., res.logvol))
saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], []
for i in range(nsamps):
loglstar_new = logl[i]
logdvol, dlv = logdvols[i], dlvs[i]
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol
logz_new = np.logaddexp(logz, logwt)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * dlv
loglstar = loglstar_new
saved_logwt.append(logwt)
saved_logz.append(logz)
saved_logzvar.append(logzvar)
saved_h.append(h)
# Copy results.
new_res = Results([item for item in res.items()])
# Overwrite items with our new estimates.
new_res.logvol = np.array(logvol)
new_res.logwt = np.array(saved_logwt)
new_res.logz = np.array(saved_logz)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
new_res.logzerr = np.sqrt(np.array(saved_logzvar))
new_res.h = np.array(saved_h)
return new_res
def resample_run(res, rstate=None, return_idx=False):
"""
Probes **sampling uncertainties** on a nested sampling run using bootstrap
resampling techniques to generate a *realization* of the (expected) prior
volume(s) associated with each sample (dead point). This effectively
splits a nested sampling run with `K` particles (live points) into a
series of `K` "strands" (i.e. runs with a single live point) which are then
bootstrapped to construct a new "resampled" run. Companion function to
:meth:`jitter_run` and :meth:`simulate_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
return_idx : bool, optional
Whether to return the list of resampled indices used to construct
the new run. Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
samples and weights based on our "bootstrapped" samples and
(expected) prior volumes.
"""
if rstate is None:
rstate = np.random
# Check whether the final set of live points were added to the
# run.
nsamps = len(res.ncall)
try:
# Check if the number of live points explicitly changes.
samples_n = res.samples_n
samples_batch = res.samples_batch
batch_bounds = res.batch_bounds
added_final_live = True
except:
# If the number of live points is constant, compute `samples_n` and
# set up the `added_final_live` flag.
nlive = res.nlive
niter = res.niter
if nsamps == niter:
samples_n = np.ones(niter, dtype='int') * nlive
added_final_live = False
elif nsamps == (niter + nlive):
samples_n = np.append(np.ones(niter, dtype='int') * nlive,
np.arange(1, nlive + 1)[::-1])
added_final_live = True
else:
raise ValueError("Final number of samples differs from number of "
"iterations and number of live points.")
samples_batch = np.zeros(len(samples_n), dtype='int')
batch_bounds = np.array([(-np.inf, np.inf)])
batch_llmin = batch_bounds[:, 0]
# Identify unique particles that make up each strand.
ids = np.unique(res.samples_id)
# Split the set of strands into two groups: a "baseline" group that
# contains points initially sampled from the prior, which gives information
# on the evidence, and an "add-on" group, which gives additional
# information conditioned on our baseline strands.
base_ids = []
addon_ids = []
for i in ids:
sbatch = samples_batch[res.samples_id == i]
if np.any(batch_llmin[sbatch] == -np.inf):
base_ids.append(i)
else:
addon_ids.append(i)
nbase, nadd = len(base_ids), len(addon_ids)
base_ids, addon_ids = np.array(base_ids), np.array(addon_ids)
# Resample strands.
if nbase > 0 and nadd > 0:
live_idx = np.append(base_ids[rstate.randint(0, nbase, size=nbase)],
addon_ids[rstate.randint(0, nadd, size=nadd)])
elif nbase > 0:
live_idx = base_ids[rstate.randint(0, nbase, size=nbase)]
elif nadd > 0:
raise ValueError("The provided `Results` does not include any points "
"initially sampled from the prior!")
else:
raise ValueError("The provided `Results` does not appear to have "
"any particles!")
# Find corresponding indices within the original run.
samp_idx = np.arange(len(res.ncall))
samp_idx = np.concatenate([samp_idx[res.samples_id == idx]
for idx in live_idx])
# Derive new sample size.
nsamps = len(samp_idx)
# Sort the loglikelihoods (there will be duplicates).
logls = res.logl[samp_idx]
idx_sort = np.argsort(logls)
samp_idx = samp_idx[idx_sort]
logl = res.logl[samp_idx]
if added_final_live:
# Compute the effective number of live points for each sample.
samp_n = np.zeros(nsamps, dtype='int')
uidxs, uidxs_n = np.unique(live_idx, return_counts=True)
for uidx, uidx_n in zip(uidxs, uidxs_n):
sel = (res.samples_id == uidx) # selection flag
sbatch = samples_batch[sel][0] # corresponding batch ID
lower = batch_llmin[sbatch] # lower bound
upper = max(res.logl[sel]) # upper bound
# Add number of live points between endpoints equal to number of
# times the strand has been resampled.
samp_n[(logl > lower) & (logl < upper)] += uidx_n
# At the endpoint, divide up the final set of points into `uidx_n`
# (roughly) equal chunks and have live points decrease across them.
endsel = (logl == upper)
endsel_n = np.count_nonzero(endsel)
chunk = endsel_n / uidx_n # define our chunk
counters = np.array(np.arange(endsel_n) / chunk, dtype='int')
nlive_end = counters[::-1] + 1 # decreasing number of live points
samp_n[endsel] += nlive_end # add live point sequence
else:
# If we didn't add the final set of live points, the run has a constant
# number of live points and can simply be re-ordered.
samp_n = samples_n[samp_idx]
# Assign log(volume) to samples.
logvol = np.cumsum(np.log(samp_n / (samp_n + 1.)))
# Computing weights using quadratic estimator.
h = 0.
logz = -1.e300
loglstar = -1.e300
logzvar = 0.
logvols_pad = np.concatenate(([0.], logvol))
logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(nsamps),
-np.ones(nsamps)])
logdvols += math.log(0.5)
dlvs = logvols_pad[:-1] - logvols_pad[1:]
saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], []
for i in range(nsamps):
loglstar_new = logl[i]
logdvol, dlv = logdvols[i], dlvs[i]
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol
logz_new = np.logaddexp(logz, logwt)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * dlv
loglstar = loglstar_new
saved_logwt.append(logwt)
saved_logz.append(logz)
saved_logzvar.append(logzvar)
saved_h.append(h)
# Compute sampling efficiency.
eff = 100. * len(res.ncall[samp_idx]) / sum(res.ncall[samp_idx])
# Copy results.
new_res = Results([item for item in res.items()])
# Overwrite items with our new estimates.
new_res.niter = len(res.ncall[samp_idx])
new_res.ncall = res.ncall[samp_idx]
new_res.eff = eff
new_res.samples = res.samples[samp_idx]
new_res.samples_id = res.samples_id[samp_idx]
new_res.samples_it = res.samples_it[samp_idx]
new_res.samples_u = res.samples_u[samp_idx]
new_res.samples_n = samp_n
new_res.logwt = np.array(saved_logwt)
new_res.logl = logl
new_res.logvol = logvol
new_res.logz = np.array(saved_logz)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
new_res.logzerr = np.sqrt(np.array(saved_logzvar))
new_res.h = np.array(saved_h)
if return_idx:
return new_res, samp_idx
else:
return new_res
def simulate_run(res, rstate=None, return_idx=False, approx=False):
"""
Probes **combined uncertainties** (statistical and sampling) on a nested
sampling run by wrapping :meth:`jitter_run` and :meth:`resample_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
return_idx : bool, optional
Whether to return the list of resampled indices used to construct
the new run. Default is `False`.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
samples and weights based on our "simulated" samples and
prior volumes.
"""
if rstate is None:
rstate = np.random
# Resample run.
new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)
# Jitter run.
new_res = jitter_run(new_res, rstate=rstate, approx=approx)
if return_idx:
return new_res, samp_idx
else:
return new_res
def reweight_run(res, logp_new, logp_old=None):
"""
Reweight a given run based on a new target distribution.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
logp_new : `~numpy.ndarray` with shape (nsamps,)
New target distribution evaluated at the location of the samples.
logp_old : `~numpy.ndarray` with shape (nsamps,)
Old target distribution evaluated at the location of the samples.
If not provided, the `logl` values from `res` will be used.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
weights based on our reweighted samples.
"""
# Extract info.
if logp_old is None:
logp_old = res['logl']
logrwt = logp_new - logp_old # ln(reweight)
logvol = res['logvol']
logl = res['logl']
nsamps = len(logvol)
# Compute weights using quadratic estimator.
h = 0.
logz = -1.e300
loglstar = -1.e300
logzvar = 0.
logvols_pad = np.concatenate(([0.], logvol))
logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(nsamps),
-np.ones(nsamps)])
logdvols += math.log(0.5)
dlvs = -np.diff(np.append(0., logvol))
saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], []
for i in range(nsamps):
loglstar_new = logl[i]
logdvol, dlv = logdvols[i], dlvs[i]
logwt =
|
np.logaddexp(loglstar_new, loglstar)
|
numpy.logaddexp
|
from __future__ import (absolute_import, division, print_function)
"""
Module for plotting data on maps with matplotlib.
Contains the :class:`Basemap` class (which does most of the
heavy lifting), and the following functions:
:func:`interp`: bilinear interpolation between rectilinear grids.
:func:`maskoceans`: mask 'wet' points of an input array.
:func:`shiftgrid`: shifts global lat/lon grids east or west.
:func:`addcyclic`: Add cyclic (wraparound) point in longitude.
"""
from distutils.version import LooseVersion
try:
from urllib import urlretrieve
from urllib2 import urlopen
except ImportError:
from urllib.request import urlretrieve, urlopen
from matplotlib import __version__ as _matplotlib_version
try:
from inspect import cleandoc as dedent
except ImportError:
# Deprecated as of version 3.1. Not quite the same
# as textwrap.dedent.
from matplotlib.cbook import dedent
# check to make sure matplotlib is not too old.
_matplotlib_version = LooseVersion(_matplotlib_version)
_mpl_required_version = LooseVersion('0.98')
if _matplotlib_version < _mpl_required_version:
msg = dedent("""
your matplotlib is too old - basemap requires version %s or
higher, you have version %s""" %
(_mpl_required_version,_matplotlib_version))
raise ImportError(msg)
from matplotlib import rcParams, is_interactive
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.patches import Ellipse, Circle, Polygon, FancyArrowPatch
from matplotlib.lines import Line2D
from matplotlib.transforms import Bbox
import pyproj
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.image import imread
import sys, os, math
from .proj import Proj
import numpy as np
import numpy.ma as ma
import _geoslib
import functools
# basemap data files now installed in lib/matplotlib/toolkits/basemap/data
# check to see if environment variable BASEMAPDATA set to a directory,
# and if so look for the data there.
if 'BASEMAPDATA' in os.environ:
basemap_datadir = os.environ['BASEMAPDATA']
if not os.path.isdir(basemap_datadir):
raise RuntimeError('Path in environment BASEMAPDATA not a directory')
else:
from mpl_toolkits import basemap_data
basemap_datadir = os.path.abspath(list(basemap_data.__path__)[0])
__version__ = "1.3.1+dev"
# module variable that sets the default value for the 'latlon' kwarg.
# can be set to True by user so plotting functions can take lons,lats
# in degrees by default, instead of x,y (map projection coords in meters).
latlon_default = False
# supported map projections.
_projnames = {'cyl' : 'Cylindrical Equidistant',
'merc' : 'Mercator',
'tmerc' : 'Transverse Mercator',
'omerc' : 'Oblique Mercator',
'mill' : 'Miller Cylindrical',
'gall' : 'Gall Stereographic Cylindrical',
'cea' : 'Cylindrical Equal Area',
'lcc' : 'Lambert Conformal',
'laea' : 'Lambert Azimuthal Equal Area',
'nplaea' : 'North-Polar Lambert Azimuthal',
'splaea' : 'South-Polar Lambert Azimuthal',
'eqdc' : 'Equidistant Conic',
'aeqd' : 'Azimuthal Equidistant',
'npaeqd' : 'North-Polar Azimuthal Equidistant',
'spaeqd' : 'South-Polar Azimuthal Equidistant',
'aea' : 'Albers Equal Area',
'stere' : 'Stereographic',
'npstere' : 'North-Polar Stereographic',
'spstere' : 'South-Polar Stereographic',
'cass' : 'Cassini-Soldner',
'poly' : 'Polyconic',
'ortho' : 'Orthographic',
'geos' : 'Geostationary',
'nsper' : 'Near-Sided Perspective',
'sinu' : 'Sinusoidal',
'moll' : 'Mollweide',
'hammer' : 'Hammer',
'robin' : 'Robinson',
'kav7' : 'Kavrayskiy VII',
'eck4' : 'Eckert IV',
'vandg' : 'van der Grinten',
'mbtfpq' : 'McBryde-Thomas Flat-Polar Quartic',
'gnom' : 'Gnomonic',
'rotpole' : 'Rotated Pole',
}
supported_projections = []
for _items in _projnames.items():
supported_projections.append(" %-17s%-40s\n" % (_items))
supported_projections = ''.join(supported_projections)
_cylproj = ['cyl','merc','mill','gall','cea']
_pseudocyl = ['moll','robin','eck4','kav7','sinu','mbtfpq','vandg','hammer']
_dg2rad = math.radians(1.)
_rad2dg = math.degrees(1.)
# projection specific parameters.
projection_params = {'cyl' : 'corners only (no width/height)',
'merc' : 'corners plus lat_ts (no width/height)',
'tmerc' : 'lon_0,lat_0,k_0',
'omerc' : 'lon_0,lat_0,lat_1,lat_2,lon_1,lon_2,no_rot,k_0',
'mill' : 'corners only (no width/height)',
'gall' : 'corners only (no width/height)',
'cea' : 'corners only plus lat_ts (no width/height)',
'lcc' : 'lon_0,lat_0,lat_1,lat_2,k_0',
'laea' : 'lon_0,lat_0',
'nplaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'splaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'eqdc' : 'lon_0,lat_0,lat_1,lat_2',
'aeqd' : 'lon_0,lat_0',
'npaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'spaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'aea' : 'lon_0,lat_0,lat_1',
'stere' : 'lon_0,lat_0,lat_ts,k_0',
'npstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'spstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'cass' : 'lon_0,lat_0',
'poly' : 'lon_0,lat_0',
'ortho' : 'lon_0,lat_0,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'geos' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'nsper' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'sinu' : 'lon_0,lat_0,no corners or width/height',
'moll' : 'lon_0,lat_0,no corners or width/height',
'hammer' : 'lon_0,lat_0,no corners or width/height',
'robin' : 'lon_0,lat_0,no corners or width/height',
'eck4' : 'lon_0,lat_0,no corners or width/height',
'kav7' : 'lon_0,lat_0,no corners or width/height',
'vandg' : 'lon_0,lat_0,no corners or width/height',
'mbtfpq' : 'lon_0,lat_0,no corners or width/height',
'gnom' : 'lon_0,lat_0',
'rotpole' : 'lon_0,o_lat_p,o_lon_p,corner lat/lon or corner x,y (no width/height)'
}
# create dictionary that maps epsg codes to Basemap kwargs.
epsgf = open(os.path.join(basemap_datadir, 'epsg'))
epsg_dict={}
for line in epsgf:
if line.startswith("#"):
continue
l = line.split()
code = l[0].strip("<>")
parms = ' '.join(l[1:-1])
_kw_args={}
for s in l[1:-1]:
try:
k,v = s.split('=')
except:
pass
k = k.strip("+")
if k=='proj':
if v == 'longlat': v = 'cyl'
if v not in _projnames:
continue
k='projection'
if k=='k':
k='k_0'
if k in ['projection','lat_1','lat_2','lon_0','lat_0',\
'a','b','k_0','lat_ts','ellps','datum']:
if k not in ['projection','ellps','datum']:
v = float(v)
_kw_args[k]=v
if 'projection' in _kw_args:
if 'a' in _kw_args:
if 'b' in _kw_args:
_kw_args['rsphere']=(_kw_args['a'],_kw_args['b'])
del _kw_args['b']
else:
_kw_args['rsphere']=_kw_args['a']
del _kw_args['a']
if 'datum' in _kw_args:
if _kw_args['datum'] == 'NAD83':
_kw_args['ellps'] = 'GRS80'
elif _kw_args['datum'] == 'NAD27':
_kw_args['ellps'] = 'clrk66'
elif _kw_args['datum'] == 'WGS84':
_kw_args['ellps'] = 'WGS84'
del _kw_args['datum']
# supported epsg projections.
# omerc not supported yet, since we can't handle
# alpha,gamma and lonc keywords.
if _kw_args['projection'] != 'omerc':
epsg_dict[code]=_kw_args
epsgf.close()
# The __init__ docstring is pulled out here because it is so long;
# Having it in the usual place makes it hard to get from the
# __init__ argument list to the code that uses the arguments.
_Basemap_init_doc = """
Sets up a basemap with specified map projection.
and creates the coastline data structures in map projection
coordinates.
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection coordinates
(in meters). The inverse transformation is done if the optional keyword
``inverse`` is set to True.
The desired projection is set with the projection keyword. Default is ``cyl``.
Supported values for the projection keyword are:
============== ====================================================
Value Description
============== ====================================================
%(supported_projections)s
============== ====================================================
For most map projections, the map projection region can either be
specified by setting these keywords:
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
llcrnrlon longitude of lower left hand corner of the desired map
domain (degrees).
llcrnrlat latitude of lower left hand corner of the desired map
domain (degrees).
urcrnrlon longitude of upper right hand corner of the desired map
domain (degrees).
urcrnrlat latitude of upper right hand corner of the desired map
domain (degrees).
============== ====================================================
or these
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
width width of desired map domain in projection coordinates
(meters).
height height of desired map domain in projection coordinates
(meters).
lon_0 center of desired map domain (in degrees).
lat_0 center of desired map domain (in degrees).
============== ====================================================
For ``sinu``, ``moll``, ``hammer``, ``npstere``, ``spstere``, ``nplaea``, ``splaea``,
``npaeqd``, ``spaeqd``, ``robin``, ``eck4``, ``kav7``, or ``mbtfpq``, the values of
llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, width and height are ignored
(because either they are computed internally, or entire globe is
always plotted).
For the cylindrical projections (``cyl``, ``merc``, ``mill``, ``cea`` and ``gall``),
the default is to use
llcrnrlon=-180,llcrnrlat=-90, urcrnrlon=180 and urcrnrlat=90). For all other
projections except ``ortho``, ``geos`` and ``nsper``, either the lat/lon values of the
corners or width and height must be specified by the user.
For ``ortho``, ``geos`` and ``nsper``, the lat/lon values of the corners may be specified,
or the x/y values of the corners (llcrnrx,llcrnry,urcrnrx,urcrnry) in the
coordinate system of the global projection (with x=0,y=0 at the center
of the global projection). If the corners are not specified,
the entire globe is plotted.
For ``rotpole``, the lat/lon values of the corners on the unrotated sphere
may be provided as llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat, or the lat/lon
values of the corners on the rotated sphere can be given as
llcrnrx,llcrnry,urcrnrx,urcrnry.
Other keyword arguments:
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
resolution resolution of boundary database to use. Can be ``c``
(crude), ``l`` (low), ``i`` (intermediate), ``h``
(high), ``f`` (full) or None.
If None, no boundary data will be read in (and
class methods such as drawcoastlines will raise an
if invoked).
Resolution drops off by roughly 80%% between datasets.
Higher res datasets are much slower to draw.
Default ``c``. Coastline data is from the GSHHS
(http://www.soest.hawaii.edu/wessel/gshhs/gshhs.html).
State, country and river datasets from the Generic
Mapping Tools (http://gmt.soest.hawaii.edu).
area_thresh coastline or lake with an area smaller than
area_thresh in km^2 will not be plotted.
Default 10000,1000,100,10,1 for resolution
``c``, ``l``, ``i``, ``h``, ``f``.
rsphere radius of the sphere used to define map projection
(default 6370997 meters, close to the arithmetic mean
radius of the earth). If given as a sequence, the
first two elements are interpreted as the radii
of the major and minor axes of an ellipsoid.
Note: sometimes an ellipsoid is specified by the
major axis and an inverse flattening parameter (if).
The minor axis (b) can be computed from the major
axis (a) and the inverse flattening parameter using
the formula if = a/(a-b).
ellps string describing ellipsoid ('GRS80' or 'WGS84',
for example). If both rsphere and ellps are given,
rsphere is ignored. Default None. See pyproj.pj_ellps
for allowed values.
suppress_ticks suppress automatic drawing of axis ticks and labels
in map projection coordinates. Default True,
so parallels and meridians can be labelled instead.
If parallel or meridian labelling is requested
(using drawparallels and drawmeridians methods),
automatic tick labelling will be supressed even if
suppress_ticks=False. suppress_ticks=False
is useful if you want to use your own custom tick
formatter, or if you want to let matplotlib label
the axes in meters using map projection
coordinates.
fix_aspect fix aspect ratio of plot to match aspect ratio
of map projection region (default True).
anchor determines how map is placed in axes rectangle
(passed to axes.set_aspect). Default is ``C``,
which means map is centered.
Allowed values are
``C``, ``SW``, ``S``, ``SE``, ``E``, ``NE``,
``N``, ``NW``, and ``W``.
celestial use astronomical conventions for longitude (i.e.
negative longitudes to the east of 0). Default False.
Implies resolution=None.
ax set default axes instance
(default None - matplotlib.pyplot.gca() may be used
to get the current axes instance).
If you do not want matplotlib.pyplot to be imported,
you can either set this to a pre-defined axes
instance, or use the ``ax`` keyword in each Basemap
method call that does drawing. In the first case,
all Basemap method calls will draw to the same axes
instance. In the second case, you can draw to
different axes with the same Basemap instance.
You can also use the ``ax`` keyword in individual
method calls to selectively override the default
axes instance.
============== ====================================================
The following keywords are map projection parameters which all default to
None. Not all parameters are used by all projections, some are ignored.
The module variable ``projection_params`` is a dictionary which
lists which parameters apply to which projections.
.. tabularcolumns:: |l|L|
================ ====================================================
Keyword Description
================ ====================================================
lat_ts latitude of true scale. Optional for stereographic,
cylindrical equal area and mercator projections.
default is lat_0 for stereographic projection.
default is 0 for mercator and cylindrical equal area
projections.
lat_1 first standard parallel for lambert conformal,
albers equal area and equidistant conic.
Latitude of one of the two points on the projection
centerline for oblique mercator. If lat_1 is not given, but
lat_0 is, lat_1 is set to lat_0 for lambert
conformal, albers equal area and equidistant conic.
lat_2 second standard parallel for lambert conformal,
albers equal area and equidistant conic.
Latitude of one of the two points on the projection
centerline for oblique mercator. If lat_2 is not
given it is set to lat_1 for lambert conformal,
albers equal area and equidistant conic.
lon_1 Longitude of one of the two points on the projection
centerline for oblique mercator.
lon_2 Longitude of one of the two points on the projection
centerline for oblique mercator.
k_0 Scale factor at natural origin (used
by 'tmerc', 'omerc', 'stere' and 'lcc').
no_rot only used by oblique mercator.
If set to True, the map projection coordinates will
not be rotated to true North. Default is False
(projection coordinates are automatically rotated).
lat_0 central latitude (y-axis origin) - used by all
projections.
lon_0 central meridian (x-axis origin) - used by all
projections.
o_lat_p latitude of rotated pole (only used by 'rotpole')
o_lon_p longitude of rotated pole (only used by 'rotpole')
boundinglat bounding latitude for pole-centered projections
(npstere,spstere,nplaea,splaea,npaeqd,spaeqd).
These projections are square regions centered
on the north or south pole.
The longitude lon_0 is at 6-o'clock, and the
latitude circle boundinglat is tangent to the edge
of the map at lon_0.
round cut off pole-centered projection at boundinglat
(so plot is a circle instead of a square). Only
relevant for npstere,spstere,nplaea,splaea,npaeqd
or spaeqd projections. Default False.
satellite_height height of satellite (in m) above equator -
only relevant for geostationary
and near-sided perspective (``geos`` or ``nsper``)
projections. Default 35,786 km.
================ ====================================================
Useful instance variables:
.. tabularcolumns:: |l|L|
================ ====================================================
Variable Name Description
================ ====================================================
projection map projection. Print the module variable
``supported_projections`` to see a list of allowed
values.
epsg EPSG code defining projection (see
http://spatialreference.org for a list of
EPSG codes and their definitions).
aspect map aspect ratio
(size of y dimension / size of x dimension).
llcrnrlon longitude of lower left hand corner of the
selected map domain.
llcrnrlat latitude of lower left hand corner of the
selected map domain.
urcrnrlon longitude of upper right hand corner of the
selected map domain.
urcrnrlat latitude of upper right hand corner of the
selected map domain.
llcrnrx x value of lower left hand corner of the
selected map domain in map projection coordinates.
llcrnry y value of lower left hand corner of the
selected map domain in map projection coordinates.
urcrnrx x value of upper right hand corner of the
selected map domain in map projection coordinates.
urcrnry y value of upper right hand corner of the
selected map domain in map projection coordinates.
rmajor equatorial radius of ellipsoid used (in meters).
rminor polar radius of ellipsoid used (in meters).
resolution resolution of boundary dataset being used (``c``
for crude, ``l`` for low, etc.).
If None, no boundary dataset is associated with the
Basemap instance.
proj4string the string describing the map projection that is
used by PROJ.4.
================ ====================================================
**Converting from Geographic (lon/lat) to Map Projection (x/y) Coordinates**
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection
coordinates (in meters). If optional keyword ``inverse`` is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection (``cyl``), this
does nothing (i.e. x,y == lon,lat).
For non-cylindrical projections, the inverse transformation
always returns longitudes between -180 and 180 degrees. For
cylindrical projections (self.projection == ``cyl``, ``mill``,
``cea``, ``gall`` or ``merc``)
the inverse transformation will return longitudes between
self.llcrnrlon and self.llcrnrlat.
Input arguments lon, lat can be either scalar floats, sequences
or numpy arrays.
**Example Usage:**
>>> from mpl_toolkits.basemap import Basemap
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> # read in topo data (on a regular lat/lon grid)
>>> etopo = np.loadtxt('etopo20data.gz')
>>> lons = np.loadtxt('etopo20lons.gz')
>>> lats = np.loadtxt('etopo20lats.gz')
>>> # create Basemap instance for Robinson projection.
>>> m = Basemap(projection='robin',lon_0=0.5*(lons[0]+lons[-1]))
>>> # compute map projection coordinates for lat/lon grid.
>>> x, y = m(*np.meshgrid(lons,lats))
>>> # make filled contour plot.
>>> cs = m.contourf(x,y,etopo,30,cmap=plt.cm.jet)
>>> m.drawcoastlines() # draw coastlines
>>> m.drawmapboundary() # draw a line around the map region
>>> m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0]) # draw parallels
>>> m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1]) # draw meridians
>>> plt.title('Robinson Projection') # add a title
>>> plt.show()
[this example (simpletest.py) plus many others can be found in the
examples directory of source distribution. The "OO" version of this
example (which does not use matplotlib.pyplot) is called "simpletest_oo.py".]
""" % locals()
# unsupported projection error message.
_unsupported_projection = ["'%s' is an unsupported projection.\n"]
_unsupported_projection.append("The supported projections are:\n")
_unsupported_projection.append(supported_projections)
_unsupported_projection = ''.join(_unsupported_projection)
def _validated_ll(param, name, minval, maxval):
param = float(param)
if param > maxval or param < minval:
raise ValueError('%s must be between %f and %f degrees' %
(name, minval, maxval))
return param
def _validated_or_none(param, name, minval, maxval):
if param is None:
return None
return _validated_ll(param, name, minval, maxval)
def _insert_validated(d, param, name, minval, maxval):
if param is not None:
d[name] = _validated_ll(param, name, minval, maxval)
def _transform(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates.
@functools.wraps(plotfunc)
def with_transform(self,x,y,data,*args,**kwargs):
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
x, data = self.shiftdata(x, data,
fix_wrap_around=plotfunc.__name__ not in ["scatter"])
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,data,*args,**kwargs)
return with_transform
def _transform1d(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates.
@functools.wraps(plotfunc)
def with_transform(self,x,y,*args,**kwargs):
x = np.asarray(x)
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
if x.ndim == 1:
x = self.shiftdata(x, fix_wrap_around=plotfunc.__name__ not in ["scatter"])
elif x.ndim == 0:
if x > 180:
x = x - 360.
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,*args,**kwargs)
return with_transform
def _transformuv(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates. Works when call
# signature has two data arrays instead of one.
@functools.wraps(plotfunc)
def with_transform(self,x,y,u,v,*args,**kwargs):
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
x1, u = self.shiftdata(x, u)
x, v = self.shiftdata(x, v)
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,u,v,*args,**kwargs)
return with_transform
class Basemap(object):
def __init__(self, llcrnrlon=None, llcrnrlat=None,
urcrnrlon=None, urcrnrlat=None,
llcrnrx=None, llcrnry=None,
urcrnrx=None, urcrnry=None,
width=None, height=None,
projection='cyl', resolution='c',
area_thresh=None, rsphere=6370997.0,
ellps=None, lat_ts=None,
lat_1=None, lat_2=None,
lat_0=None, lon_0=None,
lon_1=None, lon_2=None,
o_lon_p=None, o_lat_p=None,
k_0=None,
no_rot=False,
suppress_ticks=True,
satellite_height=35786000,
boundinglat=None,
fix_aspect=True,
anchor='C',
celestial=False,
round=False,
epsg=None,
ax=None):
# docstring is added after __init__ method definition
# set epsg code if given, set to 4326 for projection='cyl':
if epsg is not None:
self.epsg = epsg
elif projection == 'cyl':
self.epsg = 4326
# replace kwarg values with those implied by epsg code,
# if given.
if hasattr(self,'epsg'):
if str(self.epsg) not in epsg_dict:
raise ValueError('%s is not a supported EPSG code' %
self.epsg)
epsg_params = epsg_dict[str(self.epsg)]
for k in epsg_params:
if k == 'projection':
projection = epsg_params[k]
elif k == 'rsphere':
rsphere = epsg_params[k]
elif k == 'ellps':
ellps = epsg_params[k]
elif k == 'lat_1':
lat_1 = epsg_params[k]
elif k == 'lat_2':
lat_2 = epsg_params[k]
elif k == 'lon_0':
lon_0 = epsg_params[k]
elif k == 'lat_0':
lat_0 = epsg_params[k]
elif k == 'lat_ts':
lat_ts = epsg_params[k]
elif k == 'k_0':
k_0 = epsg_params[k]
# fix aspect to ratio to match aspect ratio of map projection
# region
self.fix_aspect = fix_aspect
# where to put plot in figure (default is 'C' or center)
self.anchor = anchor
# geographic or celestial coords?
self.celestial = celestial
# map projection.
self.projection = projection
# bounding lat (for pole-centered plots)
self.boundinglat = boundinglat
# is a round pole-centered plot desired?
self.round = round
# full disk projection?
self._fulldisk = False # default value
# set up projection parameter dict.
projparams = {}
projparams['proj'] = projection
# if ellps keyword specified, it over-rides rsphere.
if ellps is not None:
try:
elldict = pyproj.pj_ellps[ellps]
except KeyError:
raise ValueError(
'illegal ellps definition, allowed values are %s' %
pyproj.pj_ellps.keys())
projparams['a'] = elldict['a']
if 'b' in elldict:
projparams['b'] = elldict['b']
else:
projparams['b'] = projparams['a']*(1.0-(1.0/elldict['rf']))
else:
try:
if rsphere[0] > rsphere[1]:
projparams['a'] = rsphere[0]
projparams['b'] = rsphere[1]
else:
projparams['a'] = rsphere[1]
projparams['b'] = rsphere[0]
except:
if projection == 'tmerc':
# use bR_a instead of R because of obscure bug
# in proj4 for tmerc projection.
projparams['bR_a'] = rsphere
else:
projparams['R'] = rsphere
# set units to meters.
projparams['units']='m'
# check for sane values of lon_0, lat_0, lat_ts, lat_1, lat_2
lat_0 = _validated_or_none(lat_0, 'lat_0', -90, 90)
lat_1 = _validated_or_none(lat_1, 'lat_1', -90, 90)
lat_2 = _validated_or_none(lat_2, 'lat_2', -90, 90)
lat_ts = _validated_or_none(lat_ts, 'lat_ts', -90, 90)
lon_0 = _validated_or_none(lon_0, 'lon_0', -360, 720)
lon_1 = _validated_or_none(lon_1, 'lon_1', -360, 720)
lon_2 = _validated_or_none(lon_2, 'lon_2', -360, 720)
llcrnrlon = _validated_or_none(llcrnrlon, 'llcrnrlon', -360, 720)
urcrnrlon = _validated_or_none(urcrnrlon, 'urcrnrlon', -360, 720)
llcrnrlat = _validated_or_none(llcrnrlat, 'llcrnrlat', -90, 90)
urcrnrlat = _validated_or_none(urcrnrlat, 'urcrnrlat', -90, 90)
_insert_validated(projparams, lat_0, 'lat_0', -90, 90)
_insert_validated(projparams, lat_1, 'lat_1', -90, 90)
_insert_validated(projparams, lat_2, 'lat_2', -90, 90)
_insert_validated(projparams, lat_ts, 'lat_ts', -90, 90)
_insert_validated(projparams, lon_0, 'lon_0', -360, 720)
_insert_validated(projparams, lon_1, 'lon_1', -360, 720)
_insert_validated(projparams, lon_2, 'lon_2', -360, 720)
if projection in ['geos','nsper']:
projparams['h'] = satellite_height
# check for sane values of projection corners.
using_corners = (None not in [llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat])
if using_corners:
self.llcrnrlon = _validated_ll(llcrnrlon, 'llcrnrlon', -360, 720)
self.urcrnrlon = _validated_ll(urcrnrlon, 'urcrnrlon', -360, 720)
self.llcrnrlat = _validated_ll(llcrnrlat, 'llcrnrlat', -90, 90)
self.urcrnrlat = _validated_ll(urcrnrlat, 'urcrnrlat', -90, 90)
# for each of the supported projections,
# compute lat/lon of domain corners
# and set values in projparams dict as needed.
if projection in ['lcc', 'eqdc', 'aea']:
if projection == 'lcc' and k_0 is not None:
projparams['k_0']=k_0
# if lat_0 is given, but not lat_1,
# set lat_1=lat_0
if lat_1 is None and lat_0 is not None:
lat_1 = lat_0
projparams['lat_1'] = lat_1
if lat_1 is None or lon_0 is None:
raise ValueError('must specify lat_1 or lat_0 and lon_0 for %s basemap (lat_2 is optional)' % _projnames[projection])
if lat_2 is None:
projparams['lat_2'] = lat_1
if not using_corners:
using_cornersxy = (None not in [llcrnrx,llcrnry,urcrnrx,urcrnry])
if using_cornersxy:
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecornersllur(llcrnrx,llcrnry,urcrnrx,urcrnry,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
else:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'stere':
if k_0 is not None:
projparams['k_0']=k_0
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Stereographic basemap (lat_ts is optional)')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in ['spstere', 'npstere',
'splaea', 'nplaea',
'spaeqd', 'npaeqd']:
if (projection == 'splaea' and boundinglat >= 0) or\
(projection == 'nplaea' and boundinglat <= 0):
msg='boundinglat cannot extend into opposite hemisphere'
raise ValueError(msg)
if boundinglat is None or lon_0 is None:
raise ValueError('must specify boundinglat and lon_0 for %s basemap' % _projnames[projection])
if projection[0] == 's':
sgn = -1
else:
sgn = 1
rootproj = projection[2:]
projparams['proj'] = rootproj
if rootproj == 'stere':
projparams['lat_ts'] = sgn * 90.
projparams['lat_0'] = sgn * 90.
self.llcrnrlon = lon_0 - sgn*45.
self.urcrnrlon = lon_0 + sgn*135.
proj = pyproj.Proj(projparams)
x,y = proj(lon_0,boundinglat)
lon,self.llcrnrlat = proj(math.sqrt(2.)*y,0.,inverse=True)
self.urcrnrlat = self.llcrnrlat
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[projection])
elif projection == 'laea':
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Lambert Azimuthal basemap')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in ['tmerc','gnom','cass','poly'] :
if projection == 'tmerc' and k_0 is not None:
projparams['k_0']=k_0
if projection == 'gnom' and 'R' not in projparams:
raise ValueError('gnomonic projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Transverse Mercator, Gnomonic, Cassini-Soldnerr and Polyconic basemap')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'ortho':
if 'R' not in projparams:
raise ValueError('orthographic projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Orthographic basemap')
if (lat_0 == 90 or lat_0 == -90) and\
None in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
# for ortho plot centered on pole, set boundinglat to equator.
# (so meridian labels can be drawn in this special case).
self.boundinglat = 0
self.round = True
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
# FIXME: won't work for points exactly on equator??
if np.abs(lat_0) < 1.e-2: lat_0 = 1.e-2
projparams['lat_0'] = lat_0
elif projection == 'geos':
if lat_0 is not None and lat_0 != 0:
raise ValueError('lat_0 must be zero for Geostationary basemap')
if lon_0 is None:
raise ValueError('must specify lon_0 for Geostationary basemap')
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'nsper':
if 'R' not in projparams:
raise ValueError('near-sided perspective projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
msg='must specify lon_0 and lat_0 for near-sided perspective Basemap'
raise ValueError(msg)
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in _pseudocyl:
if lon_0 is None:
raise ValueError('must specify lon_0 for %s projection' % _projnames[self.projection])
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
llcrnrlon = lon_0-180.
llcrnrlat = -90.
urcrnrlon = lon_0+180
urcrnrlat = 90.
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'omerc':
if k_0 is not None:
projparams['k_0']=k_0
if lat_1 is None or lon_1 is None or lat_2 is None or lon_2 is None:
raise ValueError('must specify lat_1,lon_1 and lat_2,lon_2 for Oblique Mercator basemap')
projparams['lat_1'] = lat_1
projparams['lon_1'] = lon_1
projparams['lat_2'] = lat_2
projparams['lon_2'] = lon_2
projparams['lat_0'] = lat_0
if no_rot:
projparams['no_rot']=''
#if not using_corners:
# raise ValueError, 'cannot specify map region with width and height keywords for this projection, please specify lat/lon values of corners'
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'aeqd':
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Azimuthal Equidistant basemap')
if not using_corners:
if width is None or height is None:
self._fulldisk = True
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
else:
self._fulldisk = False
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
if not self._fulldisk:
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in _cylproj:
if projection == 'merc' or projection == 'cea':
if lat_ts is None:
lat_ts = 0.
projparams['lat_ts'] = lat_ts
if not using_corners:
llcrnrlat = -90.
urcrnrlat = 90.
if lon_0 is not None:
llcrnrlon = lon_0-180.
urcrnrlon = lon_0+180.
else:
llcrnrlon = -180.
urcrnrlon = 180
if projection == 'merc':
# clip plot region to be within -89.99S to 89.99N
# (mercator is singular at poles)
if llcrnrlat < -89.99: llcrnrlat = -89.99
if llcrnrlat > 89.99: llcrnrlat = 89.99
if urcrnrlat < -89.99: urcrnrlat = -89.99
if urcrnrlat > 89.99: urcrnrlat = 89.99
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if lon_0 is not None:
projparams['lon_0'] = lon_0
else:
projparams['lon_0']=0.5*(llcrnrlon+urcrnrlon)
elif projection == 'rotpole':
if lon_0 is None or o_lon_p is None or o_lat_p is None:
msg='must specify lon_0,o_lat_p,o_lon_p for rotated pole Basemap'
raise ValueError(msg)
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
projparams['lon_0']=lon_0
projparams['o_lon_p']=o_lon_p
projparams['o_lat_p']=o_lat_p
projparams['o_proj']='longlat'
projparams['proj']='ob_tran'
if not using_corners and None in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
raise ValueError('must specify lat/lon values of corners in degrees')
if None not in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
p = pyproj.Proj(projparams)
llcrnrx = _dg2rad*llcrnrx; llcrnry = _dg2rad*llcrnry
urcrnrx = _dg2rad*urcrnrx; urcrnry = _dg2rad*urcrnry
llcrnrlon, llcrnrlat = p(llcrnrx,llcrnry,inverse=True)
urcrnrlon, urcrnrlat = p(urcrnrx,urcrnry,inverse=True)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
else:
raise ValueError(_unsupported_projection % projection)
# initialize proj4
proj = Proj(projparams,self.llcrnrlon,self.llcrnrlat,self.urcrnrlon,self.urcrnrlat)
# make sure axis ticks are suppressed.
self.noticks = suppress_ticks
# map boundary not yet drawn.
self._mapboundarydrawn = False
# make Proj instance a Basemap instance variable.
self.projtran = proj
# copy some Proj attributes.
atts = ['rmajor','rminor','esq','flattening','ellipsoid','projparams']
for att in atts:
self.__dict__[att] = proj.__dict__[att]
# these only exist for geostationary projection.
if hasattr(proj,'_width'):
self.__dict__['_width'] = proj.__dict__['_width']
if hasattr(proj,'_height'):
self.__dict__['_height'] = proj.__dict__['_height']
# spatial reference string (useful for georeferencing output
# images with gdal_translate).
if hasattr(self,'_proj4'):
#self.srs = proj._proj4.srs
self.srs = proj._proj4.pjinitstring
else:
pjargs = []
for key,value in self.projparams.items():
# 'cyl' projection translates to 'eqc' in PROJ.4
if projection == 'cyl' and key == 'proj':
value = 'eqc'
# ignore x_0 and y_0 settings for 'cyl' projection
# (they are not consistent with what PROJ.4 uses)
elif projection == 'cyl' and key in ['x_0','y_0']:
continue
pjargs.append('+'+key+"="+str(value)+' ')
self.srs = ''.join(pjargs)
self.proj4string = self.srs
# set instance variables defining map region.
self.xmin = proj.xmin
self.xmax = proj.xmax
self.ymin = proj.ymin
self.ymax = proj.ymax
if projection == 'cyl':
self.aspect = (self.urcrnrlat-self.llcrnrlat)/(self.urcrnrlon-self.llcrnrlon)
else:
self.aspect = (proj.ymax-proj.ymin)/(proj.xmax-proj.xmin)
if projection in ['geos','ortho','nsper'] and \
None not in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
self.llcrnrx = llcrnrx+0.5*proj.xmax
self.llcrnry = llcrnry+0.5*proj.ymax
self.urcrnrx = urcrnrx+0.5*proj.xmax
self.urcrnry = urcrnry+0.5*proj.ymax
self._fulldisk = False
else:
self.llcrnrx = proj.llcrnrx
self.llcrnry = proj.llcrnry
self.urcrnrx = proj.urcrnrx
self.urcrnry = proj.urcrnry
if self.projection == 'rotpole':
lon0,lat0 = self(0.5*(self.llcrnrx + self.urcrnrx),\
0.5*(self.llcrnry + self.urcrnry),\
inverse=True)
self.projparams['lat_0']=lat0
# if ax == None, pyplot.gca may be used.
self.ax = ax
self.lsmask = None
# This will record hashs of Axes instances.
self._initialized_axes = set()
# set defaults for area_thresh.
self.resolution = resolution
# celestial=True implies resolution=None (no coastlines).
if self.celestial:
self.resolution=None
if area_thresh is None and self.resolution is not None:
if resolution == 'c':
area_thresh = 10000.
elif resolution == 'l':
area_thresh = 1000.
elif resolution == 'i':
area_thresh = 100.
elif resolution == 'h':
area_thresh = 10.
elif resolution == 'f':
area_thresh = 1.
else:
raise ValueError("boundary resolution must be one of 'c','l','i','h' or 'f'")
self.area_thresh = area_thresh
# define map boundary polygon (in lat/lon coordinates)
blons, blats, self._boundarypolyll, self._boundarypolyxy = self._getmapboundary()
self.boundarylats = blats
self.boundarylons = blons
# set min/max lats for projection domain.
if self.projection in _cylproj:
self.latmin = self.llcrnrlat
self.latmax = self.urcrnrlat
self.lonmin = self.llcrnrlon
self.lonmax = self.urcrnrlon
elif self.projection in ['ortho','geos','nsper'] + _pseudocyl:
self.latmin = -90.
self.latmax = 90.
self.lonmin = self.llcrnrlon
self.lonmax = self.urcrnrlon
else:
lons, lats = self.makegrid(1001,1001)
lats = ma.masked_where(lats > 1.e20,lats)
lons = ma.masked_where(lons > 1.e20,lons)
self.latmin = lats.min()
self.latmax = lats.max()
self.lonmin = lons.min()
self.lonmax = lons.max()
NPole = _geoslib.Point(self(0.,90.))
SPole = _geoslib.Point(self(0.,-90.))
if lat_0 is None:
lon_0, lat_0 =\
self(0.5*(self.xmin+self.xmax),
0.5*(self.ymin+self.ymax),inverse=True)
Dateline = _geoslib.Point(self(180.,lat_0))
Greenwich = _geoslib.Point(self(0.,lat_0))
hasNP = NPole.within(self._boundarypolyxy)
hasSP = SPole.within(self._boundarypolyxy)
hasPole = hasNP or hasSP
hasDateline = Dateline.within(self._boundarypolyxy)
hasGreenwich = Greenwich.within(self._boundarypolyxy)
# projection crosses dateline (and not Greenwich or pole).
if not hasPole and hasDateline and not hasGreenwich:
if self.lonmin < 0 and self.lonmax > 0.:
lons = np.where(lons < 0, lons+360, lons)
self.lonmin = lons.min()
self.lonmax = lons.max()
# read in coastline polygons, only keeping those that
# intersect map boundary polygon.
if self.resolution is not None:
self.coastsegs, self.coastpolygontypes =\
self._readboundarydata('gshhs',as_polygons=True)
# reformat for use in matplotlib.patches.Polygon.
self.coastpolygons = []
for seg in self.coastsegs:
x, y = list(zip(*seg))
self.coastpolygons.append((x,y))
# replace coastsegs with line segments (instead of polygons)
self.coastsegs, types =\
self._readboundarydata('gshhs',as_polygons=False)
# create geos Polygon structures for land areas.
# currently only used in is_land method.
self.landpolygons=[]
self.lakepolygons=[]
if self.resolution is not None and len(self.coastpolygons) > 0:
#self.islandinlakepolygons=[]
#self.lakeinislandinlakepolygons=[]
x, y = list(zip(*self.coastpolygons))
for x,y,typ in zip(x,y,self.coastpolygontypes):
b = np.asarray([x,y]).T
if typ == 1: self.landpolygons.append(_geoslib.Polygon(b))
if typ == 2: self.lakepolygons.append(_geoslib.Polygon(b))
#if typ == 3: self.islandinlakepolygons.append(_geoslib.Polygon(b))
#if typ == 4: self.lakeinislandinlakepolygons.append(_geoslib.Polygon(b))
# set __init__'s docstring
__init__.__doc__ = _Basemap_init_doc
def __call__(self,x,y,inverse=False):
"""
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection
coordinates (in meters). If optional keyword ``inverse`` is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection (``cyl``), this
does nothing (i.e. x,y == lon,lat).
For non-cylindrical projections, the inverse transformation
always returns longitudes between -180 and 180 degrees. For
cylindrical projections (self.projection == ``cyl``,
``cea``, ``mill``, ``gall`` or ``merc``)
the inverse transformation will return longitudes between
self.llcrnrlon and self.llcrnrlat.
Input arguments lon, lat can be either scalar floats,
sequences, or numpy arrays.
"""
if self.celestial:
# don't assume center of map is at greenwich
# (only relevant for cyl or pseudo-cyl projections)
if self.projection in _pseudocyl or self.projection in _cylproj:
lon_0=self.projparams['lon_0']
else:
lon_0 = 0.
if self.celestial and not inverse:
try:
x = 2.*lon_0-x
except TypeError:
x = [2*lon_0-xx for xx in x]
if self.projection == 'rotpole' and inverse:
try:
x = _dg2rad*x
except TypeError:
x = [_dg2rad*xx for xx in x]
try:
y = _dg2rad*y
except TypeError:
y = [_dg2rad*yy for yy in y]
xout,yout = self.projtran(x,y,inverse=inverse)
if self.celestial and inverse:
try:
xout = -2.*lon_0-xout
except:
xout = [-2.*lon_0-xx for xx in xout]
if self.projection == 'rotpole' and not inverse:
try:
xout = _rad2dg*xout
xout = np.where(xout < 0., xout+360, xout)
except TypeError:
xout = [_rad2dg*xx for xx in xout]
xout = [xx+360. if xx < 0 else xx for xx in xout]
try:
yout = _rad2dg*yout
except TypeError:
yout = [_rad2dg*yy for yy in yout]
return xout,yout
def makegrid(self,nx,ny,returnxy=False):
"""
return arrays of shape (ny,nx) containing lon,lat coordinates of
an equally spaced native projection grid.
If ``returnxy = True``, the x,y values of the grid are returned also.
"""
return self.projtran.makegrid(nx,ny,returnxy=returnxy)
def _readboundarydata(self,name,as_polygons=False):
"""
read boundary data, clip to map projection region.
"""
msg = dedent("""
Unable to open boundary dataset file. Only the 'crude', 'low' and
'intermediate' resolution datasets are installed by default. If you
are requesting a 'high' or 'full' resolution dataset, you need to
install the `basemap-data-hires` package.""")
# only gshhs coastlines can be polygons.
if name != 'gshhs': as_polygons=False
try:
bdatfile = open(os.path.join(basemap_datadir,name+'_'+self.resolution+'.dat'),'rb')
bdatmetafile = open(os.path.join(basemap_datadir,name+'meta_'+self.resolution+'.dat'),'r')
except:
raise IOError(msg)
polygons = []
polygon_types = []
# coastlines are polygons, other boundaries are line segments.
if name == 'gshhs':
Shape = _geoslib.Polygon
else:
Shape = _geoslib.LineString
# see if map projection region polygon contains a pole.
NPole = _geoslib.Point(self(0.,90.))
SPole = _geoslib.Point(self(0.,-90.))
boundarypolyxy = self._boundarypolyxy
boundarypolyll = self._boundarypolyll
hasNP = NPole.within(boundarypolyxy)
hasSP = SPole.within(boundarypolyxy)
containsPole = hasNP or hasSP
# these projections cannot cross pole.
if containsPole and\
self.projection in _cylproj + _pseudocyl + ['geos']:
raise ValueError('%s projection cannot cross pole'%(self.projection))
# make sure some projections have has containsPole=True
# we will compute the intersections in stereographic
# coordinates, then transform back. This is
# because these projections are only defined on a hemisphere, and
# some boundary features (like Eurasia) would be undefined otherwise.
tostere =\
['omerc','ortho','gnom','nsper','nplaea','npaeqd','splaea','spaeqd']
if self.projection in tostere and name == 'gshhs':
containsPole = True
lon_0=self.projparams['lon_0']
lat_0=self.projparams['lat_0']
re = self.projparams['R']
# center of stereographic projection restricted to be
# nearest one of 6 points on the sphere (every 90 deg lat/lon).
lon0 = 90.*(np.around(lon_0/90.))
lat0 = 90.*(np.around(lat_0/90.))
if np.abs(int(lat0)) == 90: lon0=0.
maptran = pyproj.Proj(proj='stere',lon_0=lon0,lat_0=lat0,R=re)
# boundary polygon for ortho/gnom/nsper projection
# in stereographic coordinates.
b = self._boundarypolyll.boundary
blons = b[:,0]; blats = b[:,1]
b[:,0], b[:,1] = maptran(blons, blats)
boundarypolyxy = _geoslib.Polygon(b)
for line in bdatmetafile:
linesplit = line.split()
area = float(linesplit[1])
south = float(linesplit[3])
north = float(linesplit[4])
crossdatelineE=False; crossdatelineW=False
if name == 'gshhs':
id = linesplit[7]
if id.endswith('E'):
crossdatelineE = True
elif id.endswith('W'):
crossdatelineW = True
# make sure south/north limits of dateline crossing polygons
# (Eurasia) are the same, since they will be merged into one.
# (this avoids having one filtered out and not the other).
if crossdatelineE:
south_save=south
north_save=north
if crossdatelineW:
south=south_save
north=north_save
if area < 0.: area = 1.e30
useit = self.latmax>=south and self.latmin<=north and area>self.area_thresh
if useit:
typ = int(linesplit[0])
npts = int(linesplit[2])
offsetbytes = int(linesplit[5])
bytecount = int(linesplit[6])
bdatfile.seek(offsetbytes,0)
# read in binary string convert into an npts by 2
# numpy array (first column is lons, second is lats).
polystring = bdatfile.read(bytecount)
# binary data is little endian.
b = np.array(np.frombuffer(polystring,dtype='<f4'),'f8')
b.shape = (npts,2)
b2 = b.copy()
# merge polygons that cross dateline.
poly = Shape(b)
# hack to try to avoid having Antartica filled polygon
# covering entire map (if skipAnart = False, this happens
# for ortho lon_0=-120, lat_0=60, for example).
skipAntart = self.projection in tostere and south < -89 and \
not hasSP
if crossdatelineE and not skipAntart:
if not poly.is_valid(): poly=poly.fix()
polyE = poly
continue
elif crossdatelineW and not skipAntart:
if not poly.is_valid(): poly=poly.fix()
b = poly.boundary
b[:,0] = b[:,0]+360.
poly = Shape(b)
poly = poly.union(polyE)
if not poly.is_valid(): poly=poly.fix()
b = poly.boundary
b2 = b.copy()
# fix Antartica.
if name == 'gshhs' and south < -89:
b = b[4:,:]
b2 = b.copy()
poly = Shape(b)
# if map boundary polygon is a valid one in lat/lon
# coordinates (i.e. it does not contain either pole),
# the intersections of the boundary geometries
# and the map projection region can be computed before
# transforming the boundary geometry to map projection
# coordinates (this saves time, especially for small map
# regions and high-resolution boundary geometries).
if not containsPole:
# close Antarctica.
if name == 'gshhs' and south < -89:
lons2 = b[:,0]
lats = b[:,1]
lons1 = lons2 - 360.
lons3 = lons2 + 360.
lons = lons1.tolist()+lons2.tolist()+lons3.tolist()
lats = lats.tolist()+lats.tolist()+lats.tolist()
lonstart,latstart = lons[0], lats[0]
lonend,latend = lons[-1], lats[-1]
lons.insert(0,lonstart)
lats.insert(0,-90.)
lons.append(lonend)
lats.append(-90.)
b = np.empty((len(lons),2),np.float64)
b[:,0] = lons; b[:,1] = lats
poly = Shape(b)
if not poly.is_valid(): poly=poly.fix()
# if polygon instersects map projection
# region, process it.
if poly.intersects(boundarypolyll):
if name != 'gshhs' or as_polygons:
geoms = poly.intersection(boundarypolyll)
else:
# convert polygons to line segments
poly = _geoslib.LineString(poly.boundary)
geoms = poly.intersection(boundarypolyll)
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
blons = b[:,0]; blats = b[:,1]
bx, by = self(blons, blats)
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
else:
# create duplicate polygons shifted by -360 and +360
# (so as to properly treat polygons that cross
# Greenwich meridian).
b2[:,0] = b[:,0]-360
poly1 = Shape(b2)
b2[:,0] = b[:,0]+360
poly2 = Shape(b2)
polys = [poly1,poly,poly2]
for poly in polys:
# try to fix "non-noded intersection" errors.
if not poly.is_valid(): poly=poly.fix()
# if polygon instersects map projection
# region, process it.
if poly.intersects(boundarypolyll):
if name != 'gshhs' or as_polygons:
geoms = poly.intersection(boundarypolyll)
else:
# convert polygons to line segments
# note: use fix method here or Eurasia
# line segments sometimes disappear.
poly = _geoslib.LineString(poly.fix().boundary)
geoms = poly.intersection(boundarypolyll)
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
blons = b[:,0]; blats = b[:,1]
# transformation from lat/lon to
# map projection coordinates.
bx, by = self(blons, blats)
if not as_polygons or len(bx) > 4:
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
# if map boundary polygon is not valid in lat/lon
# coordinates, compute intersection between map
# projection region and boundary geometries in map
# projection coordinates.
else:
# transform coordinates from lat/lon
# to map projection coordinates.
# special case for ortho/gnom/nsper, compute coastline polygon
# vertices in stereographic coords.
if name == 'gshhs' and as_polygons and self.projection in tostere:
b[:,0], b[:,1] = maptran(b[:,0], b[:,1])
else:
b[:,0], b[:,1] = self(b[:,0], b[:,1])
goodmask = np.logical_and(b[:,0]<1.e20,b[:,1]<1.e20)
# if less than two points are valid in
# map proj coords, skip this geometry.
if np.sum(goodmask) <= 1: continue
if name != 'gshhs' or (name == 'gshhs' and not as_polygons):
# if not a polygon,
# just remove parts of geometry that are undefined
# in this map projection.
bx = np.compress(goodmask, b[:,0])
by = np.compress(goodmask, b[:,1])
# split coastline segments that jump across entire plot.
xd = (bx[1:]-bx[0:-1])**2
yd = (by[1:]-by[0:-1])**2
dist = np.sqrt(xd+yd)
split = dist > 0.1*(self.xmax-self.xmin)
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
iprev = 0
ind.append(len(xd))
for i in ind:
# don't add empty lists.
if len(list(range(iprev,i))):
polygons.append(list(zip(bx[iprev:i],by[iprev:i])))
iprev = i
else:
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
continue
# create a GEOS geometry object.
if name == 'gshhs' and not as_polygons:
# convert polygons to line segments
poly = _geoslib.LineString(poly.boundary)
else:
# this is a workaround to avoid
# GEOS_ERROR: CGAlgorithmsDD::orientationIndex encountered NaN/Inf numbers
b[np.isposinf(b)] = 1e20
b[np.isneginf(b)] = -1e20
poly = Shape(b)
# this is a workaround to avoid
# "GEOS_ERROR: TopologyException:
# found non-noded intersection between ..."
if not poly.is_valid(): poly=poly.fix()
# if geometry instersects map projection
# region, and doesn't have any invalid points, process it.
if goodmask.all() and poly.intersects(boundarypolyxy):
# if geometry intersection calculation fails,
# just move on.
try:
geoms = poly.intersection(boundarypolyxy)
except:
continue
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
# if projection in ['ortho','gnom','nsper'],
# transform polygon from stereographic
# to ortho/gnom/nsper coordinates.
if self.projection in tostere:
# if coastline polygon covers more than 99%
# of map region for fulldisk projection,
# it's probably bogus, so skip it.
#areafrac = psub.area()/boundarypolyxy.area()
#if self.projection == ['ortho','nsper']:
# if name == 'gshhs' and\
# self._fulldisk and\
# areafrac > 0.99: continue
# inverse transform from stereographic
# to lat/lon.
b[:,0], b[:,1] = maptran(b[:,0], b[:,1], inverse=True)
# orthographic/gnomonic/nsper.
b[:,0], b[:,1]= self(b[:,0], b[:,1])
if not as_polygons or len(b) > 4:
polygons.append(list(zip(b[:,0],b[:,1])))
polygon_types.append(typ)
bdatfile.close()
bdatmetafile.close()
return polygons, polygon_types
def _getmapboundary(self):
"""
create map boundary polygon (in lat/lon and x/y coordinates)
"""
nx = 100; ny = 100
maptran = self
if self.projection in ['ortho','geos','nsper']:
# circular region.
thetas = np.linspace(0.,2.*np.pi,2*nx*ny)[:-1]
rminor = self._height
rmajor = self._width
x = rmajor*np.cos(thetas) + rmajor
y = rminor*np.sin(thetas) + rminor
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
# compute proj instance for full disk, if necessary.
if not self._fulldisk:
projparms = self.projparams.copy()
del projparms['x_0']
del projparms['y_0']
if self.projection == 'ortho':
llcrnrx = -self.rmajor
llcrnry = -self.rmajor
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
llcrnrx = -self._width
llcrnry = -self._height
urcrnrx = -llcrnrx
urcrnry = -llcrnry
projparms['x_0']=-llcrnrx
projparms['y_0']=-llcrnry
maptran = pyproj.Proj(projparms)
elif self.projection == 'aeqd' and self._fulldisk:
# circular region.
thetas = np.linspace(0.,2.*np.pi,2*nx*ny)[:-1]
rminor = self._height
rmajor = self._width
x = rmajor*np.cos(thetas) + rmajor
y = rminor*np.sin(thetas) + rminor
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
elif self.projection in _pseudocyl:
nx = 10*nx; ny = 10*ny
# quasi-elliptical region.
lon_0 = self.projparams['lon_0']
# left side
lats1 = np.linspace(-89.9999,89.9999,ny).tolist()
lons1 = len(lats1)*[lon_0-179.9]
# top.
lons2 = np.linspace(lon_0-179.9,lon_0+179.9,nx).tolist()
lats2 = len(lons2)*[89.9999]
# right side
lats3 = np.linspace(89.9999,-89.9999,ny).tolist()
lons3 = len(lats3)*[lon_0+179.9]
# bottom.
lons4 = np.linspace(lon_0+179.9,lon_0-179.9,nx).tolist()
lats4 = len(lons4)*[-89.9999]
lons = np.array(lons1+lons2+lons3+lons4,np.float64)
lats = np.array(lats1+lats2+lats3+lats4,np.float64)
x, y = maptran(lons,lats)
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
else: # all other projections are rectangular.
nx = 100*nx; ny = 100*ny
# left side (x = xmin, ymin <= y <= ymax)
yy = np.linspace(self.ymin, self.ymax, ny)[:-1]
x = len(yy)*[self.xmin]; y = yy.tolist()
# top (y = ymax, xmin <= x <= xmax)
xx = np.linspace(self.xmin, self.xmax, nx)[:-1]
x = x + xx.tolist()
y = y + len(xx)*[self.ymax]
# right side (x = xmax, ymin <= y <= ymax)
yy = np.linspace(self.ymax, self.ymin, ny)[:-1]
x = x + len(yy)*[self.xmax]; y = y + yy.tolist()
# bottom (y = ymin, xmin <= x <= xmax)
xx = np.linspace(self.xmax, self.xmin, nx)[:-1]
x = x + xx.tolist()
y = y + len(xx)*[self.ymin]
x = np.array(x,np.float64)
y = np.array(y,np.float64)
b = np.empty((4,2),np.float64)
b[:,0]=[self.xmin,self.xmin,self.xmax,self.xmax]
b[:,1]=[self.ymin,self.ymax,self.ymax,self.ymin]
boundaryxy = _geoslib.Polygon(b)
if self.projection in _cylproj:
# make sure map boundary doesn't quite include pole.
if self.urcrnrlat > 89.9999:
urcrnrlat = 89.9999
else:
urcrnrlat = self.urcrnrlat
if self.llcrnrlat < -89.9999:
llcrnrlat = -89.9999
else:
llcrnrlat = self.llcrnrlat
lons = [self.llcrnrlon, self.llcrnrlon, self.urcrnrlon, self.urcrnrlon]
lats = [llcrnrlat, urcrnrlat, urcrnrlat, llcrnrlat]
self.boundarylonmin = min(lons)
self.boundarylonmax = max(lons)
x, y = self(lons, lats)
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
else:
if self.projection not in _pseudocyl:
lons, lats = maptran(x,y,inverse=True)
# fix lons so there are no jumps.
n = 1
lonprev = lons[0]
for lon,lat in zip(lons[1:],lats[1:]):
if np.abs(lon-lonprev) > 90.:
if lonprev < 0:
lon = lon - 360.
else:
lon = lon + 360
lons[n] = lon
lonprev = lon
n = n + 1
self.boundarylonmin = lons.min()
self.boundarylonmax = lons.max()
# for circular full disk projections where boundary is
# a latitude circle, set boundarylonmax and boundarylonmin
# to cover entire world (so parallels will be drawn).
if self._fulldisk and \
np.abs(self.boundarylonmax-self.boundarylonmin) < 1.:
self.boundarylonmin = -180.
self.boundarylonmax = 180.
b = np.empty((len(lons),2),np.float64)
b[:,0] = lons; b[:,1] = lats
boundaryll = _geoslib.Polygon(b)
return lons, lats, boundaryll, boundaryxy
def drawmapboundary(self,color='k',linewidth=1.0,fill_color=None,\
zorder=None,ax=None):
"""
draw boundary around map projection region, optionally
filling interior of region.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth line width for boundary (default 1.)
color color of boundary line (default black)
fill_color fill the map region background with this
color (default is to fill with axis
background color). If set to the string
'none', no filling is done.
zorder sets the zorder for filling map background
(default 0).
ax axes instance to use
(default None, use default axes instance).
============== ====================================================
returns matplotlib.collections.PatchCollection representing map boundary.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# if no fill_color given, use axes background color.
# if fill_color is string 'none', really don't fill.
if fill_color is None:
if _matplotlib_version >= '2.0':
fill_color = ax.get_facecolor()
else:
fill_color = ax.get_axis_bgcolor()
elif fill_color == 'none' or fill_color == 'None':
fill_color = None
limb = None
if self.projection in ['ortho','geos','nsper'] or (self.projection=='aeqd' and\
self._fulldisk):
limb = Ellipse((self._width,self._height),2.*self._width,2.*self._height)
if self.projection in ['ortho','geos','nsper','aeqd'] and self._fulldisk:
# elliptical region.
ax.set_frame_on(False)
elif self.projection in _pseudocyl: # elliptical region.
ax.set_frame_on(False)
nx = 100; ny = 100
if self.projection == 'vandg':
nx = 10*nx; ny = 10*ny
# quasi-elliptical region.
lon_0 = self.projparams['lon_0']
# left side
lats1 = np.linspace(-89.9999,89.99999,ny).tolist()
lons1 = len(lats1)*[lon_0-179.9]
# top.
lons2 = np.linspace(lon_0-179.9999,lon_0+179.9999,nx).tolist()
lats2 = len(lons2)*[89.9999]
# right side
lats3 = np.linspace(89.9999,-89.9999,ny).tolist()
lons3 = len(lats3)*[lon_0+179.9999]
# bottom.
lons4 = np.linspace(lon_0+179.9999,lon_0-179.9999,nx).tolist()
lats4 = len(lons4)*[-89.9999]
lons = np.array(lons1+lons2+lons3+lons4,np.float64)
lats = np.array(lats1+lats2+lats3+lats4,np.float64)
x, y = self(lons,lats)
xy = list(zip(x,y))
limb = Polygon(xy)
elif self.round:
ax.set_frame_on(False)
limb = Circle((0.5*(self.xmax+self.xmin),0.5*(self.ymax+self.ymin)),
radius=0.5*(self.xmax-self.xmin),fc='none')
else: # all other projections are rectangular.
ax.set_frame_on(True)
for spine in ax.spines.values():
spine.set_linewidth(linewidth)
spine.set_edgecolor(color)
if zorder is not None:
spine.set_zorder(zorder)
if self.projection not in ['geos','ortho','nsper']:
limb = ax.patch
if limb is not None:
if limb is not ax.patch:
ax.add_patch(limb)
self._mapboundarydrawn = limb
if fill_color is None:
limb.set_fill(False)
else:
limb.set_facecolor(fill_color)
limb.set_zorder(0)
limb.set_edgecolor(color)
limb.set_linewidth(linewidth)
if zorder is not None:
limb.set_zorder(zorder)
limb.set_clip_on(True)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return limb
def fillcontinents(self,color='0.8',lake_color=None,ax=None,zorder=None,alpha=None):
"""
Fill continents.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to fill continents (default gray).
lake_color color to fill inland lakes (default axes background).
ax axes instance (overrides default axes instance).
zorder sets the zorder for the continent polygons (if not
specified, uses default zorder for a Polygon patch).
Set to zero if you want to paint over the filled
continents).
alpha sets alpha transparency for continent polygons
============== ====================================================
After filling continents, lakes are re-filled with
axis background color.
returns a list of matplotlib.patches.Polygon objects.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# get axis background color.
if _matplotlib_version >= '2.0':
axisbgc = ax.get_facecolor()
else:
axisbgc = ax.get_axis_bgcolor()
npoly = 0
polys = []
for x,y in self.coastpolygons:
xa = np.array(x,np.float32)
ya = np.array(y,np.float32)
# check to see if all four corners of domain in polygon (if so,
# don't draw since it will just fill in the whole map).
# ** turn this off for now since it prevents continents that
# fill the whole map from being filled **
#delx = 10; dely = 10
#if self.projection in ['cyl']:
# delx = 0.1
# dely = 0.1
#test1 = np.fabs(xa-self.urcrnrx) < delx
#test2 = np.fabs(xa-self.llcrnrx) < delx
#test3 = np.fabs(ya-self.urcrnry) < dely
#test4 = np.fabs(ya-self.llcrnry) < dely
#hasp1 = np.sum(test1*test3)
#hasp2 = np.sum(test2*test3)
#hasp4 = np.sum(test2*test4)
#hasp3 = np.sum(test1*test4)
#if not hasp1 or not hasp2 or not hasp3 or not hasp4:
if 1:
xy = list(zip(xa.tolist(),ya.tolist()))
if self.coastpolygontypes[npoly] not in [2,4]:
poly = Polygon(xy,facecolor=color,edgecolor=color,linewidth=0)
else: # lakes filled with background color by default
if lake_color is None:
poly = Polygon(xy,facecolor=axisbgc,edgecolor=axisbgc,linewidth=0)
else:
poly = Polygon(xy,facecolor=lake_color,edgecolor=lake_color,linewidth=0)
if zorder is not None:
poly.set_zorder(zorder)
if alpha is not None:
poly.set_alpha(alpha)
ax.add_patch(poly)
polys.append(poly)
npoly = npoly + 1
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip continent polygons to map limbs
polys,c = self._cliplimb(ax,polys)
return polys
def _cliplimb(self,ax,coll):
if not self._mapboundarydrawn:
return coll, None
c = self._mapboundarydrawn
if c not in ax.patches:
p = ax.add_patch(c)
#p.set_clip_on(False)
try:
coll.set_clip_path(c)
except:
for item in coll:
item.set_clip_path(c)
return coll,c
def drawcoastlines(self,linewidth=1.,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw coastlines.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth coastline width (default 1.)
linestyle coastline linestyle (default solid)
color coastline color (default black)
antialiased antialiasing switch for coastlines (default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the coastlines (if not specified,
uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
coastlines = LineCollection(self.coastsegs,antialiaseds=(antialiased,))
coastlines.set_color(color)
coastlines.set_linestyle(linestyle)
coastlines.set_linewidth(linewidth)
coastlines.set_label('_nolabel_')
if zorder is not None:
coastlines.set_zorder(zorder)
ax.add_collection(coastlines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
coastlines,c = self._cliplimb(ax,coastlines)
return coastlines
def drawcountries(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw country boundaries.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth country boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color country boundary line color (default black)
antialiased antialiasing switch for country boundaries (default
True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the country boundaries (if not
specified uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in country line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'cntrysegs'):
self.cntrysegs, types = self._readboundarydata('countries')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
countries = LineCollection(self.cntrysegs,antialiaseds=(antialiased,))
countries.set_color(color)
countries.set_linestyle(linestyle)
countries.set_linewidth(linewidth)
countries.set_label('_nolabel_')
if zorder is not None:
countries.set_zorder(zorder)
ax.add_collection(countries)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip countries to map limbs
countries,c = self._cliplimb(ax,countries)
return countries
def drawstates(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw state boundaries in Americas.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth state boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color state boundary line color (default black)
antialiased antialiasing switch for state boundaries
(default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the state boundaries (if not
specified, uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in state line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'statesegs'):
self.statesegs, types = self._readboundarydata('states')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
states = LineCollection(self.statesegs,antialiaseds=(antialiased,))
states.set_color(color)
states.set_linestyle(linestyle)
states.set_linewidth(linewidth)
states.set_label('_nolabel_')
if zorder is not None:
states.set_zorder(zorder)
ax.add_collection(states)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip states to map limbs
states,c = self._cliplimb(ax,states)
return states
def drawcounties(self,linewidth=0.1,linestyle='solid',color='k',antialiased=1,
facecolor='none',ax=None,zorder=None,drawbounds=False):
"""
Draw county boundaries in US. The county boundary shapefile
originates with the NOAA Coastal Geospatial Data Project
(http://coastalgeospatial.noaa.gov/data_gis.html).
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth county boundary line width (default 0.1)
linestyle coastline linestyle (default solid)
color county boundary line color (default black)
facecolor fill color of county (default is no fill)
antialiased antialiasing switch for county boundaries
(default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the county boundaries (if not
specified, uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
ax = ax or self._check_ax()
gis_file = os.path.join(basemap_datadir,'UScounties')
county_info = self.readshapefile(gis_file,'counties',\
default_encoding='latin-1',drawbounds=drawbounds)
counties = [coords for coords in self.counties]
counties = PolyCollection(counties)
counties.set_linestyle(linestyle)
counties.set_linewidth(linewidth)
counties.set_edgecolor(color)
counties.set_facecolor(facecolor)
counties.set_label('counties')
if zorder:
counties.set_zorder(zorder)
ax.add_collection(counties)
return counties
def drawrivers(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw major rivers.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth river boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color river boundary line color (default black)
antialiased antialiasing switch for river boundaries (default
True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the rivers (if not
specified uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in river line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'riversegs'):
self.riversegs, types = self._readboundarydata('rivers')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
rivers = LineCollection(self.riversegs,antialiaseds=(antialiased,))
rivers.set_color(color)
rivers.set_linestyle(linestyle)
rivers.set_linewidth(linewidth)
rivers.set_label('_nolabel_')
if zorder is not None:
rivers.set_zorder(zorder)
ax.add_collection(rivers)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip rivers to map limbs
rivers,c = self._cliplimb(ax,rivers)
return rivers
def is_land(self,xpt,ypt):
"""
Returns True if the given x,y point (in projection coordinates) is
over land, False otherwise. The definition of land is based upon
the GSHHS coastline polygons associated with the class instance.
Points over lakes inside land regions are not counted as land points.
"""
if self.resolution is None: return None
landpt = False
for poly in self.landpolygons:
landpt = _geoslib.Point((xpt,ypt)).within(poly)
if landpt: break
lakept = False
for poly in self.lakepolygons:
lakept = _geoslib.Point((xpt,ypt)).within(poly)
if lakept: break
return landpt and not lakept
def readshapefile(self,shapefile,name,drawbounds=True,zorder=None,
linewidth=0.5,color='k',antialiased=1,ax=None,
default_encoding='utf-8'):
"""
Read in shape file, optionally draw boundaries on map.
.. note::
- Assumes shapes are 2D
- only works for Point, MultiPoint, Polyline and Polygon shapes.
- vertices/points must be in geographic (lat/lon) coordinates.
Mandatory Arguments:
.. tabularcolumns:: |l|L|
============== ====================================================
Argument Description
============== ====================================================
shapefile path to shapefile components. Example:
shapefile='/home/jeff/esri/world_borders' assumes
that world_borders.shp, world_borders.shx and
world_borders.dbf live in /home/jeff/esri.
name name for Basemap attribute to hold the shapefile
vertices or points in map projection
coordinates. Class attribute name+'_info' is a list
of dictionaries, one for each shape, containing
attributes of each shape from dbf file, For
example, if name='counties', self.counties
will be a list of x,y vertices for each shape in
map projection coordinates and self.counties_info
will be a list of dictionaries with shape
attributes. Rings in individual Polygon
shapes are split out into separate polygons, and
additional keys 'RINGNUM' and 'SHAPENUM' are added
to the shape attribute dictionary.
============== ====================================================
The following optional keyword arguments are only relevant for Polyline
and Polygon shape types, for Point and MultiPoint shapes they are
ignored.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
drawbounds draw boundaries of shapes (default True).
zorder shape boundary zorder (if not specified,
default for mathplotlib.lines.LineCollection
is used).
linewidth shape boundary line width (default 0.5)
color shape boundary line color (default black)
antialiased antialiasing switch for shape boundaries
(default True).
ax axes instance (overrides default axes instance)
============== ====================================================
A tuple (num_shapes, type, min, max) containing shape file info
is returned.
num_shapes is the number of shapes, type is the type code (one of
the SHPT* constants defined in the shapelib module, see
http://shapelib.maptools.org/shp_api.html) and min and
max are 4-element lists with the minimum and maximum values of the
vertices. If ``drawbounds=True`` a
matplotlib.patches.LineCollection object is appended to the tuple.
"""
import shapefile as shp
from shapefile import Reader
shp.default_encoding = default_encoding
if not os.path.exists('%s.shp'%shapefile):
raise IOError('cannot locate %s.shp'%shapefile)
if not os.path.exists('%s.shx'%shapefile):
raise IOError('cannot locate %s.shx'%shapefile)
if not os.path.exists('%s.dbf'%shapefile):
raise IOError('cannot locate %s.dbf'%shapefile)
# open shapefile, read vertices for each object, convert
# to map projection coordinates (only works for 2D shape types).
try:
shf = Reader(shapefile, encoding=default_encoding)
except:
raise IOError('error reading shapefile %s.shp' % shapefile)
fields = shf.fields
coords = []; attributes = []
msg=dedent("""
shapefile must have lat/lon vertices - it looks like this one has vertices
in map projection coordinates. You can convert the shapefile to geographic
coordinates using the shpproj utility from the shapelib tools
(http://shapelib.maptools.org/shapelib-tools.html)""")
shptype = shf.shapes()[0].shapeType
bbox = shf.bbox.tolist()
info = (shf.numRecords,shptype,bbox[0:2]+[0.,0.],bbox[2:]+[0.,0.])
npoly = 0
for shprec in shf.shapeRecords():
shp = shprec.shape; rec = shprec.record
npoly = npoly + 1
if shptype != shp.shapeType:
raise ValueError('readshapefile can only handle a single shape type per file')
if shptype not in [1,3,5,8]:
raise ValueError('readshapefile can only handle 2D shape types')
verts = shp.points
if shptype in [1,8]: # a Point or MultiPoint shape.
lons, lats = list(zip(*verts))
if max(lons) > 721. or min(lons) < -721. or max(lats) > 90.01 or min(lats) < -90.01:
raise ValueError(msg)
# if latitude is slightly greater than 90, truncate to 90
lats = [max(min(lat, 90.0), -90.0) for lat in lats]
if len(verts) > 1: # MultiPoint
x,y = self(lons, lats)
coords.append(list(zip(x,y)))
else: # single Point
x,y = self(lons[0], lats[0])
coords.append((x,y))
attdict={}
for r,key in zip(rec,fields[1:]):
attdict[key[0]]=r
attributes.append(attdict)
else: # a Polyline or Polygon shape.
parts = shp.parts.tolist()
ringnum = 0
for indx1,indx2 in zip(parts,parts[1:]+[len(verts)]):
ringnum = ringnum + 1
lons, lats = list(zip(*verts[indx1:indx2]))
if max(lons) > 721. or min(lons) < -721. or max(lats) > 90.01 or min(lats) < -90.01:
raise ValueError(msg)
# if latitude is slightly greater than 90, truncate to 90
lats = [max(min(lat, 90.0), -90.0) for lat in lats]
x, y = self(lons, lats)
coords.append(list(zip(x,y)))
attdict={}
for r,key in zip(rec,fields[1:]):
attdict[key[0]]=r
# add information about ring number to dictionary.
attdict['RINGNUM'] = ringnum
attdict['SHAPENUM'] = npoly
attributes.append(attdict)
# draw shape boundaries for polylines, polygons using LineCollection.
if shptype not in [1,8] and drawbounds:
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# make LineCollections for each polygon.
lines = LineCollection(coords,antialiaseds=(1,))
lines.set_color(color)
lines.set_linewidth(linewidth)
lines.set_label('_nolabel_')
if zorder is not None:
lines.set_zorder(zorder)
ax.add_collection(lines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip boundaries to map limbs
lines,c = self._cliplimb(ax,lines)
info = info + (lines,)
self.__dict__[name]=coords
self.__dict__[name+'_info']=attributes
return info
def drawparallels(self,circles,color='k',textcolor='k',linewidth=1.,zorder=None, \
dashes=[1,1],labels=[0,0,0,0],labelstyle=None, \
fmt='%g',xoffset=None,yoffset=None,ax=None,latmax=None,
**text_kwargs):
"""
Draw and label parallels (latitude lines) for values (in degrees)
given in the sequence ``circles``.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to draw parallels (default black).
textcolor color to draw labels (default black).
linewidth line width for parallels (default 1.)
zorder sets the zorder for parallels (if not specified,
uses default zorder for matplotlib.lines.Line2D
objects).
dashes dash pattern for parallels (default [1,1], i.e.
1 pixel on, 1 pixel off).
labels list of 4 values (default [0,0,0,0]) that control
whether parallels are labelled where they intersect
the left, right, top or bottom of the plot. For
example labels=[1,0,0,1] will cause parallels
to be labelled where they intersect the left and
and bottom of the plot, but not the right and top.
labelstyle if set to "+/-", north and south latitudes are
labelled with "+" and "-", otherwise they are
labelled with "N" and "S".
fmt a format string to format the parallel labels
(default '%g') **or** a function that takes a
latitude value in degrees as it's only argument
and returns a formatted string.
xoffset label offset from edge of map in x-direction
(default is 0.01 times width of map in map
projection coordinates).
yoffset label offset from edge of map in y-direction
(default is 0.01 times height of map in map
projection coordinates).
ax axes instance (overrides default axes instance)
latmax absolute value of latitude to which meridians are drawn
(default is 80).
\**text_kwargs additional keyword arguments controlling text
for labels that are passed on to
the text method of the axes instance (see
matplotlib.pyplot.text documentation).
============== ====================================================
returns a dictionary whose keys are the parallel values, and
whose values are tuples containing lists of the
matplotlib.lines.Line2D and matplotlib.text.Text instances
associated with each parallel. Deleting an item from the
dictionary removes the corresponding parallel from the plot.
"""
text_kwargs['color']=textcolor # pass textcolor kwarg on to ax.text
# if celestial=True, don't use "N" and "S" labels.
if labelstyle is None and self.celestial:
labelstyle="+/-"
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# don't draw meridians past latmax, always draw parallel at latmax.
if latmax is None: latmax = 80.
# offset for labels.
if yoffset is None:
yoffset = (self.urcrnry-self.llcrnry)/100.
if self.aspect > 1:
yoffset = self.aspect*yoffset
else:
yoffset = yoffset/self.aspect
if xoffset is None:
xoffset = (self.urcrnrx-self.llcrnrx)/100.
if self.projection in _cylproj + _pseudocyl:
lons = np.linspace(self.llcrnrlon, self.urcrnrlon, 10001)
elif self.projection in ['tmerc']:
lon_0 = self.projparams['lon_0']
# tmerc only defined within +/- 90 degrees of lon_0
lons = np.linspace(lon_0-90,lon_0+90,100001)
else:
lonmin = self.boundarylonmin; lonmax = self.boundarylonmax
lons = np.linspace(lonmin, lonmax, 10001)
# make sure latmax degree parallel is drawn if projection not merc or cyl or miller
try:
circlesl = list(circles)
except:
circlesl = circles
if self.projection not in _cylproj + _pseudocyl:
if max(circlesl) > 0 and latmax not in circlesl:
circlesl.append(latmax)
if min(circlesl) < 0 and -latmax not in circlesl:
circlesl.append(-latmax)
xdelta = 0.01*(self.xmax-self.xmin)
ydelta = 0.01*(self.ymax-self.ymin)
linecolls = {}
for circ in circlesl:
lats = circ*np.ones(len(lons),np.float32)
x,y = self(lons,lats)
# remove points outside domain.
# leave a little slop around edges (3*xdelta)
# don't really know why, but this appears to be needed to
# or lines sometimes don't reach edge of plot.
testx = np.logical_and(x>=self.xmin-3*xdelta,x<=self.xmax+3*xdelta)
x = np.compress(testx, x)
y = np.compress(testx, y)
testy = np.logical_and(y>=self.ymin-3*ydelta,y<=self.ymax+3*ydelta)
x = np.compress(testy, x)
y = np.compress(testy, y)
lines = []
if len(x) > 1 and len(y) > 1:
# split into separate line segments if necessary.
# (not necessary for cylindrical or pseudocylindricl projections)
xd = (x[1:]-x[0:-1])**2
yd = (y[1:]-y[0:-1])**2
dist = np.sqrt(xd+yd)
if self.projection not in ['cyl','rotpole']:
split = dist > self.rmajor/10.
else:
split = dist > 1.
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
xl = []
yl = []
iprev = 0
ind.append(len(xd))
for i in ind:
xl.append(x[iprev:i])
yl.append(y[iprev:i])
iprev = i
else:
xl = [x]
yl = [y]
# draw each line segment.
for x,y in zip(xl,yl):
# skip if only a point.
if len(x) > 1 and len(y) > 1:
l = Line2D(x,y,linewidth=linewidth)
l.set_color(color)
l.set_dashes(dashes)
l.set_label('_nolabel_')
if zorder is not None:
l.set_zorder(zorder)
ax.add_line(l)
lines.append(l)
linecolls[circ] = (lines,[])
# draw labels for parallels
# parallels not labelled for fulldisk orthographic or geostationary
if self.projection in ['ortho','geos','nsper','vandg','aeqd'] and max(labels):
if self.projection == 'vandg' or self._fulldisk:
sys.stdout.write('Warning: Cannot label parallels on %s basemap' % _projnames[self.projection])
labels = [0,0,0,0]
# search along edges of map to see if parallels intersect.
# if so, find x,y location of intersection and draw a label there.
dx = (self.xmax-self.xmin)/1000.
dy = (self.ymax-self.ymin)/1000.
if self.projection in _pseudocyl:
lon_0 = self.projparams['lon_0']
for dolab,side in zip(labels,['l','r','t','b']):
if not dolab: continue
# for cylindrical projections, don't draw parallels on top or bottom.
if self.projection in _cylproj + _pseudocyl and side in ['t','b']: continue
if side in ['l','r']:
nmax = int((self.ymax-self.ymin)/dy+1)
yy = np.linspace(self.llcrnry,self.urcrnry,nmax)
if side == 'l':
if self.projection in _pseudocyl:
lats = np.linspace(-89.99,89,99,nmax)
if self.celestial:
lons = (self.projparams['lon_0']+180.)*np.ones(len(lats),lats.dtype)
else:
lons = (self.projparams['lon_0']-180.)*np.ones(len(lats),lats.dtype)
xx, yy = self(lons, lats)
else:
xx = self.llcrnrx*np.ones(yy.shape,yy.dtype)
lons,lats = self(xx,yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
if self.projection in _pseudocyl:
lats = np.linspace(-89.99,89,99,nmax)
if self.celestial:
lons = (self.projparams['lon_0']-180.)*np.ones(len(lats),lats.dtype)
else:
lons = (self.projparams['lon_0']+180.)*np.ones(len(lats),lats.dtype)
xx, yy = self(lons, lats)
else:
xx = self.urcrnrx*np.ones(yy.shape,yy.dtype)
lons,lats = self(xx,yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
else:
nmax = int((self.xmax-self.xmin)/dx+1)
xx = np.linspace(self.llcrnrx,self.urcrnrx,nmax)
if side == 'b':
lons,lats = self(xx,self.llcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(xx,self.urcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
for lat in circles:
# don't label parallels for round polar plots
if self.round: continue
# find index of parallel (there may be two, so
# search from left and right).
nl = _searchlist(lats,lat)
nr = _searchlist(lats[::-1],lat)
if nr != -1: nr = len(lons)-nr-1
latlab = _setlatlab(fmt,lat,labelstyle)
# parallels can intersect each map edge twice.
for i,n in enumerate([nl,nr]):
# don't bother if close to the first label.
if i and abs(nr-nl) < 100: continue
if n >= 0:
t = None
if side == 'l':
if self.projection in _pseudocyl:
if self.celestial:
xlab,ylab = self(lon_0+179.9,lat)
else:
xlab,ylab = self(lon_0-179.9,lat)
else:
xlab = self.llcrnrx
xlab = xlab-xoffset
if self.projection in _pseudocyl:
if lat>0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='bottom',**text_kwargs)
elif lat<0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='top',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='center',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='center',**text_kwargs)
elif side == 'r':
if self.projection in _pseudocyl:
if self.celestial:
xlab,ylab = self(lon_0-179.9,lat)
else:
xlab,ylab = self(lon_0+179.9,lat)
else:
xlab = self.urcrnrx
xlab = xlab+xoffset
if self.projection in _pseudocyl:
if lat>0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='bottom',**text_kwargs)
elif lat<0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='top',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='center',**text_kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='center',**text_kwargs)
elif side == 'b':
t = ax.text(xx[n],self.llcrnry-yoffset,latlab,horizontalalignment='center',verticalalignment='top',**text_kwargs)
else:
t = ax.text(xx[n],self.urcrnry+yoffset,latlab,horizontalalignment='center',verticalalignment='bottom',**text_kwargs)
if t is not None: linecolls[lat][1].append(t)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
keys = list(linecolls.keys()); vals = list(linecolls.values())
for k,v in zip(keys,vals):
if v == ([], []):
del linecolls[k]
# add a remove method to each tuple.
else:
linecolls[k] = _tup(linecolls[k])
# override __delitem__ in dict to call remove() on values.
pardict = _dict(linecolls)
# clip parallels for round polar plots (and delete labels).
for lines, _ in pardict.values():
self._cliplimb(ax, lines)
return pardict
def drawmeridians(self,meridians,color='k',textcolor='k',linewidth=1., zorder=None,\
dashes=[1,1],labels=[0,0,0,0],labelstyle=None,\
fmt='%g',xoffset=None,yoffset=None,ax=None,latmax=None,
**text_kwargs):
"""
Draw and label meridians (longitude lines) for values (in degrees)
given in the sequence ``meridians``.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to draw meridians (default black).
textcolor color to draw labels (default black).
linewidth line width for meridians (default 1.)
zorder sets the zorder for meridians (if not specified,
uses default zorder for matplotlib.lines.Line2D
objects).
dashes dash pattern for meridians (default [1,1], i.e.
1 pixel on, 1 pixel off).
labels list of 4 values (default [0,0,0,0]) that control
whether meridians are labelled where they intersect
the left, right, top or bottom of the plot. For
example labels=[1,0,0,1] will cause meridians
to be labelled where they intersect the left and
and bottom of the plot, but not the right and top.
labelstyle if set to "+/-", east and west longitudes are
labelled with "+" and "-", otherwise they are
labelled with "E" and "W".
fmt a format string to format the meridian labels
(default '%g') **or** a function that takes a
longitude value in degrees as it's only argument
and returns a formatted string.
xoffset label offset from edge of map in x-direction
(default is 0.01 times width of map in map
projection coordinates).
yoffset label offset from edge of map in y-direction
(default is 0.01 times height of map in map
projection coordinates).
ax axes instance (overrides default axes instance)
latmax absolute value of latitude to which meridians are drawn
(default is 80).
\**text_kwargs additional keyword arguments controlling text
for labels that are passed on to
the text method of the axes instance (see
matplotlib.pyplot.text documentation).
============== ====================================================
returns a dictionary whose keys are the meridian values, and
whose values are tuples containing lists of the
matplotlib.lines.Line2D and matplotlib.text.Text instances
associated with each meridian. Deleting an item from the
dictionary removes the correpsonding meridian from the plot.
"""
text_kwargs['color']=textcolor # pass textcolor kwarg on to ax.text
# for cylindrical projections, try to handle wraparound (i.e. if
# projection is defined in -180 to 0 and user asks for meridians from
# 180 to 360 to be drawn, it should work)
if self.projection in _cylproj or self.projection in _pseudocyl:
def addlon(meridians,madd):
minside = (madd >= self.llcrnrlon and madd <= self.urcrnrlon)
if minside and madd not in meridians: meridians.append(madd)
return meridians
merids = list(meridians)
meridians = []
for m in merids:
meridians = addlon(meridians,m)
meridians = addlon(meridians,m+360)
meridians = addlon(meridians,m-360)
meridians.sort()
# if celestial=True, don't use "E" and "W" labels.
if labelstyle is None and self.celestial:
labelstyle="+/-"
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# don't draw meridians past latmax, always draw parallel at latmax.
if latmax is None: latmax = 80. # unused w/ cyl, merc or miller proj.
# offset for labels.
if yoffset is None:
yoffset = (self.urcrnry-self.llcrnry)/100.
if self.aspect > 1:
yoffset = self.aspect*yoffset
else:
yoffset = yoffset/self.aspect
if xoffset is None:
xoffset = (self.urcrnrx-self.llcrnrx)/100.
lats = np.linspace(self.latmin,self.latmax,10001)
if self.projection not in _cylproj + _pseudocyl:
testlat = np.logical_and(lats>-latmax,lats<latmax)
lats = np.compress(testlat,lats)
xdelta = 0.01*(self.xmax-self.xmin)
ydelta = 0.01*(self.ymax-self.ymin)
linecolls = {}
for merid in meridians:
lons = merid*np.ones(len(lats),np.float32)
x,y = self(lons,lats)
# remove points outside domain.
# leave a little slop around edges (3*xdelta)
# don't really know why, but this appears to be needed to
# or lines sometimes don't reach edge of plot.
testx = np.logical_and(x>=self.xmin-3*xdelta,x<=self.xmax+3*xdelta)
x = np.compress(testx, x)
y = np.compress(testx, y)
testy = np.logical_and(y>=self.ymin-3*ydelta,y<=self.ymax+3*ydelta)
x = np.compress(testy, x)
y = np.compress(testy, y)
lines = []
if len(x) > 1 and len(y) > 1:
# split into separate line segments if necessary.
# (not necessary for mercator or cylindrical or miller).
xd = (x[1:]-x[0:-1])**2
yd = (y[1:]-y[0:-1])**2
dist = np.sqrt(xd+yd)
if self.projection not in ['cyl','rotpole']:
split = dist > self.rmajor/10.
else:
split = dist > 1.
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
xl = []
yl = []
iprev = 0
ind.append(len(xd))
for i in ind:
xl.append(x[iprev:i])
yl.append(y[iprev:i])
iprev = i
else:
xl = [x]
yl = [y]
# draw each line segment.
for x,y in zip(xl,yl):
# skip if only a point.
if len(x) > 1 and len(y) > 1:
l = Line2D(x,y,linewidth=linewidth)
l.set_color(color)
l.set_dashes(dashes)
l.set_label('_nolabel_')
if zorder is not None:
l.set_zorder(zorder)
ax.add_line(l)
lines.append(l)
linecolls[merid] = (lines,[])
# draw labels for meridians.
# meridians not labelled for sinusoidal, hammer, mollweide,
# VanDerGrinten or full-disk orthographic/geostationary.
if self.projection in ['sinu','moll','hammer','vandg'] and max(labels):
sys.stdout.write('Warning: Cannot label meridians on %s basemap' % _projnames[self.projection])
labels = [0,0,0,0]
if self.projection in ['ortho','geos','nsper','aeqd'] and max(labels):
if self._fulldisk and self.boundinglat is None:
sys.stdout.write(dedent(
"""'Warning: Cannot label meridians on full-disk
Geostationary, Orthographic or Azimuthal equidistant basemap
"""))
labels = [0,0,0,0]
# search along edges of map to see if parallels intersect.
# if so, find x,y location of intersection and draw a label there.
dx = (self.xmax-self.xmin)/1000.
dy = (self.ymax-self.ymin)/1000.
if self.projection in _pseudocyl:
lon_0 = self.projparams['lon_0']
xmin,ymin = self(lon_0-179.9,-90)
xmax,ymax = self(lon_0+179.9,90)
for dolab,side in zip(labels,['l','r','t','b']):
if not dolab or self.round: continue
# for cylindrical projections, don't draw meridians on left or right.
if self.projection in _cylproj + _pseudocyl and side in ['l','r']: continue
if side in ['l','r']:
nmax = int((self.ymax-self.ymin)/dy+1)
yy = np.linspace(self.llcrnry,self.urcrnry,nmax)
if side == 'l':
lons,lats = self(self.llcrnrx*np.ones(yy.shape,np.float32),yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(self.urcrnrx*np.ones(yy.shape,np.float32),yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
else:
nmax = int((self.xmax-self.xmin)/dx+1)
if self.projection in _pseudocyl:
xx = np.linspace(xmin,xmax,nmax)
else:
xx = np.linspace(self.llcrnrx,self.urcrnrx,nmax)
if side == 'b':
lons,lats = self(xx,self.llcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(xx,self.urcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
for lon in meridians:
# adjust so 0 <= lon < 360
lon2 = (lon+360) % 360
# find index of meridian (there may be two, so
# search from left and right).
nl = _searchlist(lons,lon2)
nr = _searchlist(lons[::-1],lon2)
if nr != -1: nr = len(lons)-nr-1
lonlab = _setlonlab(fmt,lon2,labelstyle)
# meridians can intersect each map edge twice.
for i,n in enumerate([nl,nr]):
lat = lats[n]/100.
# no meridians > latmax for projections other than merc,cyl,miller.
if self.projection not in _cylproj and lat > latmax: continue
# don't bother if close to the first label.
if i and abs(nr-nl) < 100: continue
if n >= 0:
t = None
if side == 'l':
t = ax.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',**text_kwargs)
elif side == 'r':
t = ax.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',**text_kwargs)
elif side == 'b':
t = ax.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',**text_kwargs)
else:
t = ax.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',**text_kwargs)
if t is not None: linecolls[lon][1].append(t)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# remove empty values from linecolls dictionary
keys = list(linecolls.keys()); vals = list(linecolls.values())
for k,v in zip(keys,vals):
if v == ([], []):
del linecolls[k]
else:
# add a remove method to each tuple.
linecolls[k] = _tup(linecolls[k])
# override __delitem__ in dict to call remove() on values.
meridict = _dict(linecolls)
# for round polar plots, clip meridian lines and label them.
if self.round:
# label desired?
label = False
for lab in labels:
if lab: label = True
for merid in meridict:
if not label: continue
# label
lonlab = _setlonlab(fmt,merid,labelstyle)
x,y = self(merid,self.boundinglat)
r = np.sqrt((x-0.5*(self.xmin+self.xmax))**2+
(y-0.5*(self.ymin+self.ymax))**2)
r = r + np.sqrt(xoffset**2+yoffset**2)
if self.projection.startswith('np'):
pole = 1
elif self.projection.startswith('sp'):
pole = -1
elif self.projection == 'ortho' and self.round:
pole = 1
if pole == 1:
theta = (np.pi/180.)*(merid-self.projparams['lon_0']-90)
if self.projection == 'ortho' and\
self.projparams['lat_0'] == -90:
theta = (np.pi/180.)*(-merid+self.projparams['lon_0']+90)
x = r*np.cos(theta)+0.5*(self.xmin+self.xmax)
y = r*np.sin(theta)+0.5*(self.ymin+self.ymax)
if x > 0.5*(self.xmin+self.xmax)+xoffset:
horizalign = 'left'
elif x < 0.5*(self.xmin+self.xmax)-xoffset:
horizalign = 'right'
else:
horizalign = 'center'
if y > 0.5*(self.ymin+self.ymax)+yoffset:
vertalign = 'bottom'
elif y < 0.5*(self.ymin+self.ymax)-yoffset:
vertalign = 'top'
else:
vertalign = 'center'
# labels [l,r,t,b]
if labels[0] and not labels[1] and x >= 0.5*(self.xmin+self.xmax)+xoffset: continue
if labels[1] and not labels[0] and x <= 0.5*(self.xmin+self.xmax)-xoffset: continue
if labels[2] and not labels[3] and y <= 0.5*(self.ymin+self.ymax)-yoffset: continue
if labels[3] and not labels[2]and y >= 0.5*(self.ymin+self.ymax)+yoffset: continue
elif pole == -1:
theta = (np.pi/180.)*(-merid+self.projparams['lon_0']+90)
x = r*np.cos(theta)+0.5*(self.xmin+self.xmax)
y = r*np.sin(theta)+0.5*(self.ymin+self.ymax)
if x > 0.5*(self.xmin+self.xmax)-xoffset:
horizalign = 'right'
elif x < 0.5*(self.xmin+self.xmax)+xoffset:
horizalign = 'left'
else:
horizalign = 'center'
if y > 0.5*(self.ymin+self.ymax)-yoffset:
vertalign = 'top'
elif y < 0.5*(self.ymin+self.ymax)+yoffset:
vertalign = 'bottom'
else:
vertalign = 'center'
# labels [l,r,t,b]
if labels[0] and not labels[1] and x <= 0.5*(self.xmin+self.xmax)+xoffset: continue
if labels[1] and not labels[0] and x >= 0.5*(self.xmin+self.xmax)-xoffset: continue
if labels[2] and not labels[3] and y >= 0.5*(self.ymin+self.ymax)-yoffset: continue
if labels[3] and not labels[2] and y <= 0.5*(self.ymin+self.ymax)+yoffset: continue
t=ax.text(x,y,lonlab,horizontalalignment=horizalign,verticalalignment=vertalign,**text_kwargs)
meridict[merid][1].append(t)
for lines, _ in meridict.values():
self._cliplimb(ax, lines)
return meridict
def tissot(self,lon_0,lat_0,radius_deg,npts,ax=None,**kwargs):
"""
Draw a polygon centered at ``lon_0,lat_0``. The polygon
approximates a circle on the surface of the earth with radius
``radius_deg`` degrees latitude along longitude ``lon_0``,
made up of ``npts`` vertices.
The polygon represents a Tissot's indicatrix
(http://en.wikipedia.org/wiki/Tissot's_Indicatrix),
which when drawn on a map shows the distortion
inherent in the map projection.
.. note::
Cannot handle situations in which the polygon intersects
the edge of the map projection domain, and then re-enters the domain.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.patches.Polygon.
returns a matplotlib.patches.Polygon object."""
ax = kwargs.pop('ax', None) or self._check_ax()
g = pyproj.Geod(a=self.rmajor,b=self.rminor)
az12,az21,dist = g.inv(lon_0,lat_0,lon_0,lat_0+radius_deg)
seg = [self(lon_0,lat_0+radius_deg)]
delaz = 360./npts
az = az12
for n in range(npts):
az = az+delaz
lon, lat, az21 = g.fwd(lon_0, lat_0, az, dist)
x,y = self(lon,lat)
# add segment if it is in the map projection region.
if x < 1.e20 and y < 1.e20:
seg.append((x,y))
poly = Polygon(seg,**kwargs)
ax.add_patch(poly)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip polygons to map limbs
poly,c = self._cliplimb(ax,poly)
return poly
def gcpoints(self,lon1,lat1,lon2,lat2,npoints):
"""
compute ``points`` points along a great circle with endpoints
``(lon1,lat1)`` and ``(lon2,lat2)``.
Returns arrays x,y with map projection coordinates.
"""
gc = pyproj.Geod(a=self.rmajor,b=self.rminor)
lonlats = gc.npts(lon1,lat1,lon2,lat2,npoints-2)
lons=[lon1];lats=[lat1]
for lon,lat in lonlats:
lons.append(lon); lats.append(lat)
lons.append(lon2); lats.append(lat2)
x, y = self(lons, lats)
return x,y
def drawgreatcircle(self,lon1,lat1,lon2,lat2,del_s=100.,**kwargs):
"""
Draw a great circle on the map from the longitude-latitude
pair ``lon1,lat1`` to ``lon2,lat2``
.. tabularcolumns:: |l|L|
============== =======================================================
Keyword Description
============== =======================================================
del_s points on great circle computed every del_s kilometers
(default 100).
\**kwargs other keyword arguments are passed on to :meth:`plot`
method of Basemap instance.
============== =======================================================
Returns a list with a single ``matplotlib.lines.Line2D`` object like a
call to ``pyplot.plot()``.
"""
# use great circle formula for a perfect sphere.
gc = pyproj.Geod(a=self.rmajor,b=self.rminor)
az12,az21,dist = gc.inv(lon1,lat1,lon2,lat2)
npoints = int((dist+0.5*1000.*del_s)/(1000.*del_s))
lonlats = gc.npts(lon1,lat1,lon2,lat2,npoints)
lons = [lon1]; lats = [lat1]
for lon, lat in lonlats:
lons.append(lon)
lats.append(lat)
lons.append(lon2); lats.append(lat2)
x, y = self(lons, lats)
# Correct wrap around effect of great circles
# get points
_p = self.plot(x,y,**kwargs)
p = _p[0].get_path()
# since we know the difference between any two points, we can use this to find wrap arounds on the plot
max_dist = 1000*del_s*2
# calculate distances and compare with max allowable distance
dists = np.abs(np.diff(p.vertices[:,0]))
cuts = np.where( dists > max_dist )[0]
# if there are any cut points, cut them and begin again at the next point
for i,k in enumerate(cuts):
# vertex to cut at
cut_point = cuts[i]
# create new vertices with a nan inbetween and set those as the path's vertices
verts = np.concatenate(
[p.vertices[:cut_point, :],
[[np.nan, np.nan]],
p.vertices[cut_point+1:, :]]
)
p.codes = None
p.vertices = verts
return _p
def transform_scalar(self,datin,lons,lats,nx,ny,returnxy=False,checkbounds=False,order=1,masked=False):
"""
Interpolate a scalar field (``datin``) from a lat/lon grid with
longitudes = ``lons`` and latitudes = ``lats`` to a ``ny`` by ``nx``
map projection grid. Typically used to transform data to
map projection coordinates for plotting on a map with
the :meth:`imshow`.
.. tabularcolumns:: |l|L|
============== ====================================================
Argument Description
============== ====================================================
datin input data on a lat/lon grid.
lons, lats rank-1 arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cea``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
nx, ny The size of the output regular grid in map
projection coordinates
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
returnxy If True, the x and y values of the map
projection grid are also returned (Default False).
checkbounds If True, values of lons and lats are checked to see
that they lie within the map projection region.
Default is False, and data outside map projection
region is clipped to values on boundary.
masked If True, interpolated data is returned as a masked
array with values outside map projection region
masked (Default False).
order 0 for nearest-neighbor interpolation, 1 for
bilinear, 3 for cubic spline (Default 1).
Cubic spline interpolation requires scipy.ndimage.
============== ====================================================
Returns ``datout`` (data on map projection grid).
If returnxy=True, returns ``data,x,y``.
"""
# check that lons, lats increasing
delon = lons[1:]-lons[0:-1]
delat = lats[1:]-lats[0:-1]
if min(delon) < 0. or min(delat) < 0.:
raise ValueError('lons and lats must be increasing!')
# check that lons in -180,180 for non-cylindrical projections.
if self.projection not in _cylproj:
lonsa = np.array(lons)
count = np.sum(lonsa < -180.00001) + np.sum(lonsa > 180.00001)
if count > 1:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
# allow for wraparound point to be outside.
elif count == 1 and math.fabs(lons[-1]-lons[0]-360.) > 1.e-4:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
if returnxy:
lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)
else:
lonsout, latsout = self.makegrid(nx,ny)
datout = interp(datin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
if returnxy:
return datout, x, y
else:
return datout
def transform_vector(self,uin,vin,lons,lats,nx,ny,returnxy=False,checkbounds=False,order=1,masked=False):
"""
Rotate and interpolate a vector field (``uin,vin``) from a
lat/lon grid with longitudes = ``lons`` and latitudes = ``lats``
to a ``ny`` by ``nx`` map projection grid.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats rank-1 arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cea``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
nx, ny The size of the output regular grid in map
projection coordinates
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
returnxy If True, the x and y values of the map
projection grid are also returned (Default False).
checkbounds If True, values of lons and lats are checked to see
that they lie within the map projection region.
Default is False, and data outside map projection
region is clipped to values on boundary.
masked If True, interpolated data is returned as a masked
array with values outside map projection region
masked (Default False).
order 0 for nearest-neighbor interpolation, 1 for
bilinear, 3 for cubic spline (Default 1).
Cubic spline interpolation requires scipy.ndimage.
============== ====================================================
Returns ``uout, vout`` (vector field on map projection grid).
If returnxy=True, returns ``uout,vout,x,y``.
"""
# check that lons, lats increasing
delon = lons[1:]-lons[0:-1]
delat = lats[1:]-lats[0:-1]
if min(delon) < 0. or min(delat) < 0.:
raise ValueError('lons and lats must be increasing!')
# check that lons in -180,180 for non-cylindrical projections.
if self.projection not in _cylproj:
lonsa = np.array(lons)
count = np.sum(lonsa < -180.00001) + np.sum(lonsa > 180.00001)
if count > 1:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
# allow for wraparound point to be outside.
elif count == 1 and math.fabs(lons[-1]-lons[0]-360.) > 1.e-4:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)
# interpolate to map projection coordinates.
uin = interp(uin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
vin = interp(vin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
# rotate from geographic to map coordinates.
return self.rotate_vector(uin,vin,lonsout,latsout,returnxy=returnxy)
def rotate_vector(self,uin,vin,lons,lats,returnxy=False):
"""
Rotate a vector field (``uin,vin``) on a rectilinear grid
with longitudes = ``lons`` and latitudes = ``lats`` from
geographical (lat/lon) into map projection (x/y) coordinates.
Differs from transform_vector in that no interpolation is done.
The vector is returned on the same grid, but rotated into
x,y coordinates.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats Arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cyl``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
============== ====================================================
Returns ``uout, vout`` (rotated vector field).
If the optional keyword argument
``returnxy`` is True (default is False),
returns ``uout,vout,x,y`` (where ``x,y`` are the map projection
coordinates of the grid defined by ``lons,lats``).
"""
# if lons,lats are 1d and uin,vin are 2d, and
# lats describes 1st dim of uin,vin, and
# lons describes 2nd dim of uin,vin, make lons,lats 2d
# with meshgrid.
if lons.ndim == lats.ndim == 1 and uin.ndim == vin.ndim == 2 and\
uin.shape[1] == vin.shape[1] == lons.shape[0] and\
uin.shape[0] == vin.shape[0] == lats.shape[0]:
lons, lats = np.meshgrid(lons, lats)
else:
if not lons.shape == lats.shape == uin.shape == vin.shape:
raise TypeError("shapes of lons,lats and uin,vin don't match")
x, y = self(lons, lats)
# rotate from geographic to map coordinates.
if ma.isMaskedArray(uin):
mask = ma.getmaskarray(uin)
masked = True
uin = uin.filled(1)
vin = vin.filled(1)
else:
masked = False
# Map the (lon, lat) vector in the complex plane.
uvc = uin + 1j*vin
uvmag = np.abs(uvc)
theta = np.angle(uvc)
# Define a displacement (dlon, dlat) that moves all
# positions (lons, lats) a small distance in the
# direction of the original vector.
dc = 1E-5 * np.exp(theta*1j)
dlat = dc.imag * np.cos(np.radians(lats))
dlon = dc.real
# Deal with displacements that overshoot the North or South Pole.
farnorth = np.abs(lats+dlat) >= 90.0
somenorth = farnorth.any()
if somenorth:
dlon[farnorth] *= -1.0
dlat[farnorth] *= -1.0
# Add displacement to original location and find the native coordinates.
lon1 = lons + dlon
lat1 = lats + dlat
xn, yn = self(lon1, lat1)
# Determine the angle of the displacement in the native coordinates.
vecangle = np.arctan2(yn-y, xn-x)
if somenorth:
vecangle[farnorth] += np.pi
# Compute the x-y components of the original vector.
uvcout = uvmag * np.exp(1j*vecangle)
uout = uvcout.real
vout = uvcout.imag
if masked:
uout = ma.array(uout, mask=mask)
vout = ma.array(vout, mask=mask)
if returnxy:
return uout,vout,x,y
else:
return uout,vout
def set_axes_limits(self,ax=None):
"""
Final step in Basemap method wrappers of Axes plotting methods:
Set axis limits, fix aspect ratio for map domain using current
or specified axes instance. This is done only once per axes
instance.
In interactive mode, this method always calls draw_if_interactive
before returning.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# If we have already set the axes limits, and if the user
# has not defeated this by turning autoscaling back on,
# then all we need to do is plot if interactive.
if (hash(ax) in self._initialized_axes
and not ax.get_autoscalex_on()
and not ax.get_autoscaley_on()):
if is_interactive():
import matplotlib.pyplot as plt
plt.draw_if_interactive()
return
self._initialized_axes.add(hash(ax))
# Take control of axis scaling:
ax.set_autoscale_on(False)
# update data limits for map domain.
corners = ((self.llcrnrx, self.llcrnry), (self.urcrnrx, self.urcrnry))
ax.update_datalim(corners)
ax.set_xlim((self.llcrnrx, self.urcrnrx))
ax.set_ylim((self.llcrnry, self.urcrnry))
# if map boundary not yet drawn for elliptical maps, draw it with default values.
if not self._mapboundarydrawn or self._mapboundarydrawn not in ax.patches:
# elliptical map, draw boundary manually.
if ((self.projection in ['ortho', 'geos', 'nsper', 'aeqd'] and
self._fulldisk) or self.round or
self.projection in _pseudocyl):
# first draw boundary, no fill
limb1 = self.drawmapboundary(fill_color='none', ax=ax)
# draw another filled patch, with no boundary.
limb2 = self.drawmapboundary(linewidth=0, ax=ax)
self._mapboundarydrawn = limb2
# for elliptical map, always turn off axis_frame.
if ((self.projection in ['ortho', 'geos', 'nsper', 'aeqd'] and
self._fulldisk) or self.round or
self.projection in _pseudocyl):
# turn off axes frame.
ax.set_frame_on(False)
# make sure aspect ratio of map preserved.
# plot is re-centered in bounding rectangle.
# (anchor instance var determines where plot is placed)
if self.fix_aspect:
ax.set_aspect('equal',anchor=self.anchor)
else:
ax.set_aspect('auto',anchor=self.anchor)
# make sure axis ticks are turned off.
if self.noticks:
ax.set_xticks([])
ax.set_yticks([])
# force draw if in interactive mode.
if is_interactive():
import matplotlib.pyplot as plt
plt.draw_if_interactive()
def _save_use_hold(self, ax, kwargs):
h = kwargs.pop('hold', None)
if hasattr(ax, '_hold'):
self._tmp_hold = ax._hold
if h is not None:
ax._hold = h
def _restore_hold(self, ax):
if hasattr(ax, '_hold'):
ax._hold = self._tmp_hold
@_transform1d
def scatter(self, *args, **kwargs):
"""
Plot points with markers on the map
(see matplotlib.pyplot.scatter documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axes instance.
Other \**kwargs passed on to matplotlib.pyplot.scatter.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
ret = ax.scatter(*args, **kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transform1d
def plot(self, *args, **kwargs):
"""
Draw lines and/or markers on the map
(see matplotlib.pyplot.plot documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.plot.
"""
ax = kwargs.pop('ax', None) or self._check_ax()
self._save_use_hold(ax, kwargs)
try:
ret = ax.plot(*args, **kwargs)
finally:
self._restore_hold(ax)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
def imshow(self, *args, **kwargs):
"""
Display an image over the map
(see matplotlib.pyplot.imshow documentation).
``extent`` and ``origin`` keywords set automatically so image
will be drawn over map region.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.plot.
returns an matplotlib.image.AxesImage instance.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
kwargs['extent']=(self.llcrnrx,self.urcrnrx,self.llcrnry,self.urcrnry)
# use origin='lower', unless overridden.
if 'origin' not in kwargs:
kwargs['origin']='lower'
self._save_use_hold(ax, kwargs)
try:
ret = ax.imshow(*args, **kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip image to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transform
def pcolor(self,x,y,data,**kwargs):
"""
Make a pseudo-color plot over the map
(see matplotlib.pyplot.pcolor documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
If x or y are outside projection limb (i.e. they have values > 1.e20)
they will be convert to masked arrays with those values masked.
As a result, those values will not be plotted.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tripcolor is used.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.pcolor (or tripcolor if
``tri=True``).
Note: (taken from matplotlib.pyplot.pcolor documentation)
Ideally the dimensions of x and y should be one greater than those of data;
if the dimensions are the same, then the last row and column of data will be ignored.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
if kwargs.pop('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<1.e20,y<1.e20)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
ret = ax.tripcolor(triang,data,**kwargs)
else:
ret = ax.tripcolor(x,y,data,**kwargs)
else:
# make x,y masked arrays
# (masked where data is outside of projection limb)
x = ma.masked_values(np.where(x > 1.e20,1.e20,x), 1.e20)
y = ma.masked_values(np.where(y > 1.e20,1.e20,y), 1.e20)
ret = ax.pcolor(x,y,data,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
if self.round:
# for some reason, frame gets turned on.
ax.set_frame_on(False)
return ret
@_transform
def pcolormesh(self,x,y,data,**kwargs):
"""
Make a pseudo-color plot over the map
(see matplotlib.pyplot.pcolormesh documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.pcolormesh.
Note: (taken from matplotlib.pyplot.pcolor documentation)
Ideally the dimensions of x and y should be one greater than those of data;
if the dimensions are the same, then the last row and column of data will be ignored.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# fix for invalid grid points
if ((np.any(x > 1e20) or np.any(y > 1e20)) and
x.ndim == 2 and y.ndim == 2):
if x.shape != y.shape:
raise ValueError('pcolormesh: x and y need same dimension')
nx,ny = x.shape
if nx < data.shape[0] or ny < data.shape[1]:
raise ValueError('pcolormesh: data dimension needs to be at least that of x and y.')
mask = (
(x[:-1,:-1] > 1e20) |
(x[1:,:-1] > 1e20) |
(x[:-1,1:] > 1e20) |
(x[1:,1:] > 1e20) |
(y[:-1,:-1] > 1e20) |
(y[1:,:-1] > 1e20) |
(y[:-1,1:] > 1e20) |
(y[1:,1:] > 1e20)
)
# we do not want to overwrite original array
data = data[:nx-1,:ny-1].copy()
data[mask] = np.nan
self._save_use_hold(ax, kwargs)
try:
ret = ax.pcolormesh(x,y,data,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
if self.round:
# for some reason, frame gets turned on.
ax.set_frame_on(False)
return ret
def hexbin(self,x,y,**kwargs):
"""
Make a hexagonal binning plot of x versus y, where x, y are 1-D
sequences of the same length, N. If C is None (the default), this is a
histogram of the number of occurences of the observations at
(x[i],y[i]).
If C is specified, it specifies values at the coordinate (x[i],y[i]).
These values are accumulated for each hexagonal bin and then reduced
according to reduce_C_function, which defaults to the numpy mean function
(np.mean). (If C is specified, it must also be a 1-D sequence of the
same length as x and y.)
x, y and/or C may be masked arrays, in which case only unmasked points
will be plotted.
(see matplotlib.pyplot.hexbin documentation).
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.hexbin
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
# make x,y masked arrays
# (masked where data is outside of projection limb)
x = ma.masked_values(np.where(x > 1.e20,1.e20,x), 1.e20)
y = ma.masked_values(np.where(y > 1.e20,1.e20,y), 1.e20)
ret = ax.hexbin(x,y,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transform
def contour(self,x,y,data,*args,**kwargs):
"""
Make a contour plot over the map
(see matplotlib.pyplot.contour documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tricontour is used.
Other \*args and \**kwargs passed on to matplotlib.pyplot.contour
(or tricontour if ``tri=True``).
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
if kwargs.pop('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<self.xmin,y<self.xmin) +\
np.logical_or(x>self.xmax,y>self.xmax)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
CS = ax.tricontour(triang,data,*args,**kwargs)
else:
CS = ax.tricontour(x,y,data,*args,**kwargs)
else:
# make sure x is monotonically increasing - if not,
# print warning suggesting that the data be shifted in longitude
# with the shiftgrid function.
# only do this check for global projections.
if self.projection in _cylproj + _pseudocyl:
xx = x[x.shape[0]//2,:]
condition = (xx >= self.xmin) & (xx <= self.xmax)
xl = xx.compress(condition).tolist()
xs = xl[:]
xs.sort()
if xl != xs:
sys.stdout.write(dedent("""
WARNING: x coordinate not montonically increasing - contour plot
may not be what you expect. If it looks odd, your can either
adjust the map projection region to be consistent with your data, or
(if your data is on a global lat/lon grid) use the shiftdata
method to adjust the data to be consistent with the map projection
region (see examples/shiftdata.py)."""))
# mask for points more than one grid length outside projection limb.
xx = ma.masked_where(x > 1.e20, x)
yy = ma.masked_where(y > 1.e20, y)
epsx = np.abs(xx[:,1:]-xx[:,0:-1]).max()
epsy = np.abs(yy[1:,:]-yy[0:-1,:]).max()
xymask = \
np.logical_or(np.greater(x,self.xmax+epsx),np.greater(y,self.ymax+epsy))
xymask = xymask + \
np.logical_or(np.less(x,self.xmin-epsx),np.less(y,self.ymin-epsy))
data = ma.asarray(data)
# combine with data mask.
mask = np.logical_or(ma.getmaskarray(data),xymask)
data = ma.masked_array(data,mask=mask)
CS = ax.contour(x,y,data,*args,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt and CS.get_array() is not None:
plt.sci(CS)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
CS.collections,c = self._cliplimb(ax,CS.collections)
return CS
@_transform
def contourf(self,x,y,data,*args,**kwargs):
"""
Make a filled contour plot over the map
(see matplotlib.pyplot.contourf documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
If x or y are outside projection limb (i.e. they have values > 1.e20),
the corresponing data elements will be masked.
Extra keyword 'ax' can be used to override the default axis instance.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tricontourf is used.
Other \*args and \**kwargs passed on to matplotlib.pyplot.contourf
(or tricontourf if ``tri=True``).
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
if kwargs.get('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<1.e20,y<1.e20)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
CS = ax.tricontourf(triang,data,*args,**kwargs)
else:
CS = ax.tricontourf(x,y,data,*args,**kwargs)
else:
# make sure x is monotonically increasing - if not,
# print warning suggesting that the data be shifted in longitude
# with the shiftgrid function.
# only do this check for global projections.
if self.projection in _cylproj + _pseudocyl:
xx = x[x.shape[0]//2,:]
condition = (xx >= self.xmin) & (xx <= self.xmax)
xl = xx.compress(condition).tolist()
xs = xl[:]
xs.sort()
if xl != xs:
sys.stdout.write(dedent("""
WARNING: x coordinate not montonically increasing - contour plot
may not be what you expect. If it looks odd, your can either
adjust the map projection region to be consistent with your data, or
(if your data is on a global lat/lon grid) use the shiftgrid
function to adjust the data to be consistent with the map projection
region (see examples/contour_demo.py)."""))
# mask for points more than one grid length outside projection limb.
xx = ma.masked_where(x > 1.e20, x)
yy = ma.masked_where(y > 1.e20, y)
if self.projection != 'omerc':
epsx = np.abs(xx[:,1:]-xx[:,0:-1]).max()
epsy = np.abs(yy[1:,:]-yy[0:-1,:]).max()
else: # doesn't work for omerc (FIXME)
epsx = 0.; epsy = 0
xymask = \
np.logical_or(np.greater(x,self.xmax+epsx),np.greater(y,self.ymax+epsy))
xymask = xymask + \
np.logical_or(np.less(x,self.xmin-epsx),np.less(y,self.ymin-epsy))
data = ma.asarray(data)
# combine with data mask.
mask = np.logical_or(ma.getmaskarray(data),xymask)
data = ma.masked_array(data,mask=mask)
CS = ax.contourf(x,y,data,*args,**kwargs)
finally:
self._restore_hold(ax)
# reset current active image (only if pyplot is imported).
if plt and CS.get_array() is not None:
plt.sci(CS)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
CS.collections,c = self._cliplimb(ax,CS.collections)
return CS
@_transformuv
def quiver(self, x, y, u, v, *args, **kwargs):
"""
Make a vector plot (u, v) with arrows on the map.
Arguments may be 1-D or 2-D arrays or sequences
(see matplotlib.pyplot.quiver documentation for details).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.quiver.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
ret = ax.quiver(x,y,u,v,*args,**kwargs)
finally:
self._restore_hold(ax)
if plt is not None and ret.get_array() is not None:
plt.sci(ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret,c = self._cliplimb(ax,ret)
return ret
@_transformuv
def streamplot(self, x, y, u, v, *args, **kwargs):
"""
Draws streamlines of a vector flow.
(see matplotlib.pyplot.streamplot documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.streamplot.
"""
if _matplotlib_version < '1.2':
msg = dedent("""
streamplot method requires matplotlib 1.2 or higher,
you have %s""" % _matplotlib_version)
raise NotImplementedError(msg)
ax, plt = self._ax_plt_from_kw(kwargs)
self._save_use_hold(ax, kwargs)
try:
ret = ax.streamplot(x,y,u,v,*args,**kwargs)
finally:
self._restore_hold(ax)
if plt is not None and ret.lines.get_array() is not None:
plt.sci(ret.lines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
ret.lines,c = self._cliplimb(ax,ret.lines)
ret.arrows,c = self._cliplimb(ax,ret.arrows)
# streamplot arrows not returned in matplotlib 1.1.1, so clip all
# FancyArrow patches attached to axes instance.
if c is not None:
for p in ax.patches:
if isinstance(p,FancyArrowPatch): p.set_clip_path(c)
return ret
@_transformuv
def barbs(self, x, y, u, v, *args, **kwargs):
"""
Make a wind barb plot (u, v) with on the map.
(see matplotlib.pyplot.barbs documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.barbs
Returns two matplotlib.axes.Barbs instances, one for the Northern
Hemisphere and one for the Southern Hemisphere.
"""
if _matplotlib_version < '0.98.3':
msg = dedent("""
barb method requires matplotlib 0.98.3 or higher,
you have %s""" % _matplotlib_version)
raise NotImplementedError(msg)
ax, plt = self._ax_plt_from_kw(kwargs)
lons, lats = self(x, y, inverse=True)
unh = ma.masked_where(lats <= 0, u)
vnh = ma.masked_where(lats <= 0, v)
ush = ma.masked_where(lats > 0, u)
vsh = ma.masked_where(lats > 0, v)
self._save_use_hold(ax, kwargs)
try:
retnh = ax.barbs(x,y,unh,vnh,*args,**kwargs)
kwargs['flip_barb']=True
retsh = ax.barbs(x,y,ush,vsh,*args,**kwargs)
finally:
self._restore_hold(ax)
# Because there are two collections returned in general,
# we can't set the current image...
#if plt is not None and ret.get_array() is not None:
# plt.sci(retnh)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip to map limbs
retnh,c = self._cliplimb(ax,retnh)
retsh,c = self._cliplimb(ax,retsh)
return retnh,retsh
def drawlsmask(self,land_color="0.8",ocean_color="w",lsmask=None,
lsmask_lons=None,lsmask_lats=None,lakes=True,resolution='l',grid=5,**kwargs):
"""
Draw land-sea mask image.
.. note::
The land-sea mask image cannot be overlaid on top
of other images, due to limitations in matplotlib image handling
(you can't specify the zorder of an image).
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
land_color desired land color (color name or rgba tuple).
Default gray ("0.8").
ocean_color desired water color (color name or rgba tuple).
Default white.
lsmask An array of 0's for ocean pixels, 1's for
land pixels and 2's for lake/pond pixels.
Default is None
(default 5-minute resolution land-sea mask is used).
lakes Plot lakes and ponds (Default True)
lsmask_lons 1d array of longitudes for lsmask (ignored
if lsmask is None). Longitudes must be ordered
from -180 W eastward.
lsmask_lats 1d array of latitudes for lsmask (ignored
if lsmask is None). Latitudes must be ordered
from -90 S northward.
resolution gshhs coastline resolution used to define land/sea
mask (default 'l', available 'c','l','i','h' or 'f')
grid land/sea mask grid spacing in minutes (Default 5;
10, 2.5 and 1.25 are also available).
\**kwargs extra keyword arguments passed on to
:meth:`imshow`
============== ====================================================
If any of the lsmask, lsmask_lons or lsmask_lats keywords are not
set, the built in GSHHS land-sea mask datasets are used.
Extra keyword ``ax`` can be used to override the default axis instance.
returns a matplotlib.image.AxesImage instance.
"""
# convert land and water colors to integer rgba tuples with
# values between 0 and 255.
from matplotlib.colors import ColorConverter
c = ColorConverter()
# if conversion fails, assume it's because the color
# given is already an rgba tuple with values between 0 and 255.
try:
cl = c.to_rgba(land_color)
rgba_land = tuple([int(255*x) for x in cl])
except:
rgba_land = land_color
try:
co = c.to_rgba(ocean_color)
rgba_ocean = tuple([int(255*x) for x in co])
except:
rgba_ocean = ocean_color
# look for axes instance (as keyword, an instance variable
# or from plt.gca().
ax = kwargs.pop('ax', None) or self._check_ax()
# Clear saved lsmask if new lsmask is passed
if lsmask is not None or lsmask_lons is not None \
or lsmask_lats is not None:
# Make sure passed lsmask is not the same as cached mask
if lsmask is not self.lsmask:
self.lsmask = None
# if lsmask,lsmask_lons,lsmask_lats keywords not given,
# read default land-sea mask in from file.
if lsmask is None or lsmask_lons is None or lsmask_lats is None:
# if lsmask instance variable already set, data already
# read in.
if self.lsmask is None:
# read in land/sea mask.
lsmask_lons, lsmask_lats, lsmask =\
_readlsmask(lakes=lakes,resolution=resolution,grid=grid)
# instance variable lsmask is set on first invocation,
# it contains the land-sea mask interpolated to the native
# projection grid. Further calls to drawlsmask will not
# redo the interpolation (unless a new land-sea mask is passed
# in via the lsmask, lsmask_lons, lsmask_lats keywords).
# is it a cylindrical projection whose limits lie
# outside the limits of the image?
cylproj = self.projection in _cylproj and \
(self.urcrnrlon > lsmask_lons[-1] or \
self.llcrnrlon < lsmask_lons[0])
if cylproj:
# stack grids side-by-side (in longitiudinal direction), so
# any range of longitudes may be plotted on a world map.
# in versions of NumPy later than 1.10.0, concatenate will
# not stack these arrays as expected. If axis 1 is outside
# the dimensions of the array, concatenate will now raise
# an IndexError. Using hstack instead.
lsmask_lons = \
np.hstack((lsmask_lons,lsmask_lons[1:] + 360))
lsmask = \
|
np.hstack((lsmask,lsmask[:,1:]))
|
numpy.hstack
|
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
# script : compute_ig_influence_on_wm.py
# pourpose : compute bound IG influecence on wave merging
# author : <NAME>
# email : <EMAIL>
#
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
import warnings
import numpy as np
import datetime
import pandas as pd
import xarray as xr
from simpledbf import Dbf5
import pycwt as wavelet
from pycwt.helpers import find
from skimage.color import rgb2gray
from sklearn.preprocessing import minmax_scale
from pywavelearn.tracking import optimal_wavepaths
from pywavelearn.utils import (process_timestack,
ellapsedseconds,
align_signals,
read_pressure_data)
from pywavelearn.stats import HM0, TM01
from matplotlib.dates import date2num
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
sns.set_context("paper", font_scale=1.5, rc={"lines.linewidth": 3.0})
sns.set_style("ticks", {'axes.linewidth': 2,
'legend.frameon': True,
'axes.facecolor': "w",
'grid.color': "k"})
mpl.rcParams['axes.linewidth'] = 2
warnings.filterwarnings("ignore")
def wavelet_transform(dat, mother, s0, dj, J, dt, lims=[20, 120], t0=0):
"""
Plot the continous wavelet transform for a given signal.
Make sure to detrend and normalize the data before calling this funcion.
This is a function wrapper around the pycwt simple_sample example with
some modifications.
----------
Args:
dat (Mandatory [array like]): input signal data.
mother (Mandatory [str]): the wavelet mother name.
s0 (Mandatory [float]): starting scale.
dj (Mandatory [float]): number of sub-octaves per octaves.
j (Mandatory [float]): powers of two with dj sub-octaves.
dt (Mandatory [float]): same frequency in the same unit as the input.
lims (Mandatory [list]): Period interval to integrate the local
power spectrum.
label (Mandatory [str]): the plot y-label.
title (Mandatory [str]): the plot title.
----------
Return:
fig (plt.figure): the plot itself.
"""
# also create a time array in years.
N = dat.size
t = np.arange(0, N) * dt + t0
# write the following code to detrend and normalize the input data by its
# standard deviation. Sometimes detrending is not necessary and simply
# removing the mean value is good enough. However, if your dataset has a
# well defined trend, such as the Mauna Loa CO\ :sub:`2` dataset available
# in the above mentioned website, it is strongly advised to perform
# detrending. Here, we fit a one-degree polynomial function and then
# subtract it from the
# original data.
p = np.polyfit(t - t0, dat, 1)
dat_notrend = dat - np.polyval(p, t - t0)
std = dat_notrend.std() # Standard deviation
var = std ** 2 # Variance
dat_norm = dat_notrend / std # Normalized dataset
alpha, _, _ = wavelet.ar1(dat) # Lag-1 autocorrelation for red noise
# the following routines perform the wavelet transform and inverse wavelet
# transform using the parameters defined above. Since we have normalized
# our input time-series, we multiply the inverse transform by the standard
# deviation.
wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
dat_norm, dt, dj, s0, J, mother)
iwave = wavelet.icwt(wave, scales, dt, dj, mother) * std
# calculate the normalized wavelet and Fourier power spectra, as well as
# the Fourier equivalent periods for each wavelet scale.
power = (np.abs(wave)) ** 2
fft_power = np.abs(fft) ** 2
period = 1 / freqs
# inverse transform but only considering lims
idx1 = np.argmin(np.abs(period - LIMS[0]))
idx2 = np.argmin(np.abs(period - LIMS[1]))
_wave = wave.copy()
_wave[0:idx1, :] = 0
igwave = wavelet.icwt(_wave, scales, dt, dj, mother) * std
# could stop at this point and plot our results. However we are also
# interested in the power spectra significance test. The power is
# significant where the ratio ``power / sig95 > 1``.
signif, fft_theor = wavelet.significance(1.0, dt, scales, 0, alpha,
significance_level=0.95,
wavelet=mother)
sig95 = np.ones([1, N]) * signif[:, None]
sig95 = power / sig95
# calculate the global wavelet spectrum and determine its
# significance level.
glbl_power = power.mean(axis=1)
dof = N - scales # Correction for padding at edges
glbl_signif, tmp = wavelet.significance(var, dt, scales, 1, alpha,
significance_level=0.95, dof=dof,
wavelet=mother)
return t, dt, power, period, coi, sig95, iwave, igwave
def roundTime(dt=None, roundTo=60):
"""Round a datetime object to any time lapse in seconds
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
Author: <NAME> 2012 - Use it as you want but don't blame me.
"""
if dt is None:
dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds + roundTo / 2) // roundTo * roundTo
return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond)
if __name__ == '__main__':
print("\nAnalysing wave group influence on bore-merging, please wait...\n")
# walet constants
MOTHER = wavelet.MexicanHat()
DT = 1 # 1 second
S0 = 0.25 * DT # starting scale, in this case 0.25*1 = 0.25 seconds
DJ = 1 / 12 # twelve sub-octaves per octaves
J = 8 / DJ # eight powers of two with dj sub-octaves
# infragravity wave period range
Ta = 300
NRUNS = 11
LIMS = [25, 250]
# data
main_data = "Raw_Data/"
breaking = "WaveBreaking/"
timestacks = "Timestacks/"
overrunning = "BoreBoreCapture/"
pressure = "PressureTransducer/"
# dates
Dates = ["20180424",
"20180614",
"20140807",
"20140816",
"20161220"]
# folder location names
Locations = ["FrazerBeach/",
"SevenMileBeach/",
"OneMileBeach/",
"WerriBeach/",
"MoretonIsland/"]
# PT data
PTs = [["20170706.nc", "HP1"],
["20180614.nc", "HP2"],
["20140807.nc", "TB_19"],
["20140816.nc", "UQ1"],
["20161220.nc", "HP5"]]
# names
Names = ["<NAME>",
"Seven Mile Beach",
"One Mile Beach",
"<NAME>",
"<NAME>"]
# read overrun final tabular data
df = pd.read_csv("data/final_tabular_data.csv")
bbox = dict(boxstyle="square", ec="none", fc="1", lw=1, alpha=0.7)
# loop over locations
phases = []
dominances = []
Locs = []
Hs = []
Tp = []
R = []
RDT = []
print("Looping over locations, please wait...")
for loc, locn, date, prs in zip(Locations, Names, Dates, PTs):
print("\n -- Analysing {}".format(locn))
# loop over timestacks
for i in range(NRUNS - 1):
print(" - run {} of {}".format(i + 1, NRUNS - 1), end="\r")
i += 1 # skip the first run
# open a figure
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1,
figsize=(9, 12),
sharex=True)
# open and process timestack
f = main_data + timestacks + loc + date + "-{}.nc".format(
str(i).zfill(3))
t, x, rgb = process_timestack(xr.open_dataset(f))
gray = rgb2gray(rgb)
dtime = t
t = ellapsedseconds(t)
x -= x.min()
pxint = gray[:, int(len(x) / 2)]
# plot timestack
ax1.pcolormesh(t, x, gray.T, cmap="Greys_r")
# open and process wavepaths
f = main_data + breaking + loc + date + "-{}.dbf".format(
str(i).zfill(3))
wp = Dbf5(f, codec='utf-8').to_dataframe()
if "newtime" in wp.columns.values:
wp = wp[["newtime", "newspace", "wave"]]
wp.columns = ["t", "x", "wave"]
T, X, _ = optimal_wavepaths(wp, order=2, min_wave_period=1,
N=50, project=False, t_weights=1)
# plot wavepaths
for t, x in zip(T, X):
ax1.plot(t, x, lw=3, zorder=10)
ci = 0.25 * t.std()
ax1.plot(t + ci, x, lw=1, zorder=10, ls="--", color="k")
ax1.plot(t - ci, x, lw=1, zorder=10, ls="--", color="k")
# open and process merging event file
f = main_data + overrunning + loc + date + "-{}.csv".format(
str(i).zfill(3))
dfm = pd.read_csv(f)
tmerge = dfm["time"].values
xmerge = dfm["intersection"].values
# plot merging events
ax1.scatter(tmerge, xmerge, marker="s", s=140,
edgecolor="lawngreen", facecolor="none",
lw=4, zorder=20)
ax1.set_ylabel(r"Distance $[m]$")
for _t in tmerge:
ax1.axvline(_t, color="r", ls="-", lw=3)
# read and process PTs
f = main_data + pressure + prs[0]
t, time, eta = read_pressure_data(f, prs[1])
df_eta = pd.DataFrame(eta, index=time, columns=["eta"])
# df_int = pd.DataFrame(pxint, index=dtime, columns=["int"])
# resample to 1Hz
df_eta = df_eta.resample("1S").bfill()
# df_int = df_int.resample("1S").bfill()
# select period
tmin = dtime.min() - datetime.timedelta(seconds=LIMS[1])
tmax = dtime.max() + datetime.timedelta(seconds=LIMS[1])
df_eta = df_eta.between_time(tmin.time(),
tmax.time(),
include_start=True,
include_end=True)
eta = df_eta["eta"].values - df_eta["eta"].values.mean()
# plot PT data
lines = []
labels = []
s = ellapsedseconds(df_eta.index.to_pydatetime()) - LIMS[1]
ll = ax2.plot(s, eta, color="dodgerblue")
lines.append(ll[0])
labels.append("Sea-swell")
for _t in tmerge:
ax2.axvline(_t, color="r", ls="-", lw=3)
ax2.set_ylabel(r"$\eta - \overline{\eta}$ $[m]$")
# compute the wavelet transform
t, dt, power, period, coi, sig95, ieta, igeta = wavelet_transform(
eta, MOTHER, S0, DJ, J, DT, lims=LIMS)
t -= LIMS[1]
# integrate the local spectrum
idx = np.argmin(np.abs(period - LIMS[0]))
sw = np.trapz(power[0:idx, :], dx=dt, axis=0) / power.max()
ig = np.trapz(power[idx::, :], dx=dt, axis=0) / power.max()
tt = np.trapz(power, dx=dt, axis=0) / power.max()
# compute dominace and phase at the merging time
for _t in tmerge:
idx = np.argmin(np.abs(t - _t))
# IG phase
if igeta[idx] >= 0:
phases.append("Positive")
else:
phases.append("Negative")
# wave dominance
if ig[idx] >= sw[idx]:
dominances.append("Infragravity")
else:
dominances.append("Sea-Swell")
# append values
Hs.append(HM0(eta, 1))
Tp.append(TM01(eta, 1))
R.append(np.median(ig / sw))
# append location
Locs.append(locn)
# append run datetime
# fmt =
RDT.append(roundTime(dtime[0], roundTo=60).strftime(
"%Y-%m-%d %H:%M:%S"))
# print(time)
# plot the normalized wavelet power spectrum and significance
# level contour lines and cone of influece hatched area.
levels = [0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16]
m = ax3.contourf(t, np.log2(period), np.log2(power),
np.log2(levels),
extend='both', cmap="cividis")
extent = [t.min(), t.max(), 0, max(period)]
ax3.fill(np.concatenate([t, t[-1:] + dt, t[-1:] + dt,
t[:1] - dt, t[:1] - dt]),
np.concatenate([np.log2(coi), [1e-9],
np.log2(period[-1:]),
np.log2(period[-1:]), [1e-9]]),
'k', alpha=0.3, hatch='x')
ticks = 2 ** np.arange(np.ceil(np.log2(period.min())),
np.ceil(np.log2(period.max())))
ax3.set_yticks(np.log2(ticks))
ax3.set_yticklabels(ticks)
ax3.invert_yaxis()
ax3.set_ylim(
|
np.log2(512)
|
numpy.log2
|
#!/usr/bin/env python3
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import matplotlib
import numpy as np
import os
import unittest
from sumo.geometry.rot3 import Rot3
import sumo.metrics.utils as utils
from sumo.semantic.project_scene import ProjectScene
matplotlib.use("TkAgg")
"""
Test Evaluator utils functions
"""
class TestUtils(unittest.TestCase):
def test_quat_matrix(self):
rx = 10 # degrees
ry = 20
rz = 30
Rx = Rot3.Rx(math.radians(rx))
Ry = Rot3.Ry(math.radians(ry))
Rz = Rot3.Rz(math.radians(rz))
# matrix -> quat
R = Rz * Ry * Rx
q = utils.matrix_to_quat(R.R)
expected_q = np.array([
0.9515485, 0.0381346, 0.1893079, 0.2392983]) # computed manually
np.testing.assert_array_almost_equal(q, expected_q, 4)
# quat -> matrix
R2 = utils.quat_to_matrix(expected_q)
np.testing.assert_array_almost_equal(R2, R.R, 4)
# round trip
R2 = utils.quat_to_matrix(q)
np.testing.assert_array_almost_equal(R.R, R2, 4)
def test_quat_euler(self):
q =
|
np.array([0.9515485, 0.0381346, 0.1893079, 0.2392983])
|
numpy.array
|
# June 2017 : <NAME> : <EMAIL>
# --------------------------------------------------
#
# Adapted Mathis Hain's original code to:
# 1. Work with Python 3.
# 2. Vectorise with numpy, for speed.
# 3. Conform to PEP8 formatting.
# 4. Condense functions into two files
# 5. Make it work with the cbsyst module
# (https://github.com/oscarbranson/cbsyst) for
# calculating seawater carbonate and B chem in seawater.
#
# Original Header
# ---------------
# MyAMI Specific Ion Interaction Model (Version 1.0):
# This is a Python script to calculate thermodynamic pK's and conditional pK's
# Author: <NAME> -- <EMAIL>
#
# Reference:
# <NAME>., <NAME>., <NAME>., and <NAME>. (2015) The effects of secular calcium and magnesium concentration changes on the thermodynamics of seawater acid/base chemistry: Implications for Eocene and Cretaceous ocean carbon chemistry and buffering, Global Biogeochemical Cycles, 29, doi:10.1002/2014GB004986
#
# For general context on the calculations see Millero, 2007 (Chemical Reviews) and Millero and Pierrot, 1998 (Aquatic Geochemistry)
import itertools
import numpy as np
from tqdm import tqdm
from scipy.optimize import curve_fit
from cbsyst.helpers import Bunch, prescorr
# Functions from K_thermo_conditional.py
# --------------------------------------
# definition of the function that takes (Temp) as input and returns the K at that temp
def CalculateKcond(Tc, Sal):
"""
Calculate thermodynamic Ks adjusted for salinity.
Parameters
----------
Tc : float or array-like
Temperature in C
Sal : float or array-like
Salinity in PSU
P : float of array-like:
Pressure in bar.
"""
sqrtSal = np.sqrt(Sal)
T = Tc + 273.15
lnT = np.log(T)
Istr = 19.924 * Sal / (1000 - 1.005 * Sal) # Ionic strength after Dickson 1990a; see Dickson et al 2007
KspCcond = np.power(10, (-171.9065 - 0.077993 * T + 2839.319 / T + 71.595 *
np.log10(T) + (-0.77712 + 0.0028426 * T + 178.34 / T) *
sqrtSal - 0.07711 * Sal + 0.0041249 * Sal * sqrtSal))
K1cond = np.power(10, -3633.86 / T + 61.2172 - 9.67770 * lnT + 0.011555 * Sal - 0.0001152 * Sal * Sal) # Dickson
# K1cond = np.exp(290.9097 - 14554.21 / T - 45.0575 * lnT + (-228.39774 + 9714.36839 / T + 34.485796 * lnT) * sqrtSal + (54.20871 - 2310.48919 / T - 8.19515 * lnT) * Sal + (-3.969101 + 170.22169 / T + 0.603627 * lnT) * Sal * sqrtSal - 0.00258768 * Sal * Sal) #Millero95
K2cond = np.power(10, -471.78 / T - 25.9290 + 3.16967 * lnT +
0.01781 * Sal - 0.0001122 * Sal * Sal)
KWcond = np.exp(148.9652 - 13847.26 / T - 23.6521 * lnT +
(118.67 / T - 5.977 + 1.0495 * lnT) * sqrtSal - 0.01615 * Sal)
KBcond = np.exp((-8966.90 - 2890.53 * sqrtSal - 77.942 * Sal +
1.728 * Sal * sqrtSal - 0.0996 * Sal * Sal) /
T + (148.0248 + 137.1942 * sqrtSal + 1.62142 * Sal) +
(-24.4344 - 25.085 * sqrtSal - 0.2474 * Sal) *
lnT + 0.053105 * sqrtSal * T) # Dickson90b
KspAcond = np.power(10, (-171.945 - 0.077993 * T + 2903.293 / T + 71.595 * np.log10(T) +
(-0.068393 + 0.0017276 * T + 88.135 / T) * sqrtSal -
0.10018 * Sal + 0.0059415 * Sal * sqrtSal))
K0cond = np.exp(-60.2409 + 93.4517 * 100 / T + 23.3585 * np.log(T / 100) +
Sal * (0.023517 - 0.023656 * T / 100 +
0.0047036 * (T / 100) * (T / 100))) # Weiss74
param_HSO4_cond = np.array([141.328, -4276.1, -23.093, 324.57,
-13856, -47.986, -771.54, 35474,
114.723, -2698, 1776]) # Dickson 1990
KHSO4cond = np.exp(param_HSO4_cond[0] +
param_HSO4_cond[1] / T +
param_HSO4_cond[2] * np.log(T) + np.sqrt(Istr) *
(param_HSO4_cond[3] +
param_HSO4_cond[4] / T +
param_HSO4_cond[5] * np.log(T)) + Istr *
(param_HSO4_cond[6] +
param_HSO4_cond[7] / T +
param_HSO4_cond[8] * np.log(T)) +
param_HSO4_cond[9] / T * Istr * np.sqrt(Istr) +
param_HSO4_cond[10] / T * Istr**2 + np.log(1 - 0.001005 * Sal))
return KspCcond, K1cond, K2cond, KWcond, KBcond, KspAcond, K0cond, KHSO4cond
# Functions from PitzerParams.py
# --------------------------------------
def SupplyParams(T): # assumes T [K] -- not T [degC]
"""
Return Pitzer params for given T (Kelvin).
"""
if isinstance(T, (float, int)):
T = [T]
Tinv = 1 / T
lnT = np.log(T)
# ln_of_Tdiv29815 = np.log(T / 298.15)
Tpower2 = T**2
Tpower3 = T**3
Tpower4 = T**4
Tabs = T - 298.15
# PART 1 -- calculate thermodynamic pK's for acids, gases and complexes
# paramerters [A, B, C, D] according to Millero (2007) Table 11
# param_HF = [-12.641, 1590.2, 0, 0]
# param_H2S = [225.8375, -13275.324, -34.64354, 0]
# param_H2O = [148.9802, -13847.26, -23.6521, 0]
# param_BOH3 = [148.0248, -8966.901, -24.4344, 0]
# param_HSO4 = [141.411, -4340.704, -23.4825, 0.016637]
# param_NH4 = [-0.25444, -6285.33, 0, 0.0001635]
# param_H2CO3 = [290.9097, -14554.21, -45.0575, 0]
# param_HCO3 = [207.6548, -11843.79, -33.6485, 0]
# param_H2SO3 = [554.963, -16700.1, -93.67, 0.1022]
# param_HSO3 = [-358.57, 5477.1, 65.31, -0.1624]
# param_H3PO4 = [115.54, -4576.7518, -18.453, 0]
# param_H2PO4 = [172.1033, -8814.715, -27.927, 0]
# param_HPO4 = [-18.126, -3070.75, 0, 0]
# param_CO2 = [-60.2409, 9345.17, 18.7533, 0]
# param_SO2 = [-142.679, 8988.76, 19.8967, -0.0021]
# param_Aragonite = [303.5363, -13348.09, -48.7537, 0]
# param_Calcite = [303.1308, -13348.09, -48.7537, 0]
# definition of the function that takes (Temp, param) as input and returns the lnK at that temp
# def Eq_lnK_calcABCD(T, paramABCD):
# return paramABCD[0] + paramABCD[1] / T + paramABCD[2] * np.log(T) + paramABCD[3] * T
# How to use: ln_of_K_HCO3_at_18degC = lnK_calcABCD(18, param_HCO3)
# paramerters [A, B, C] according to Millero (2007) Table 12
# param_MgOH = [3.87, -501.6, 0]
# param_MgF = [3.504, -501.6, 0]
# param_CaF = [3.014, -501.6, 0]
# param_MgCO3 = [1.028, 0, 0.0066154]
# param_CaCO3 = [1.178, 0, 0.0066154]
# param_SrCO3 = [1.028, 0, 0.0066154]
# param_MgH2PO4 = [1.13, 0, 0]
# param_CaH2PO4 = [1, 0, 0]
# param_MgHPO4 = [2.7, 0, 0]
# param_CaHPO4 = [2.74, 0, 0]
# param_MgPO4 = [5.63, 0, 0]
# param_CaPO4 = [7.1, 0, 0]
# definition of the function that takes (Temp, param) as input and returns the lnK at that temp
# def lnK_calcABC(T, paramABC):
# return paramABC[0] + paramABC[1] / T + paramABC[2] * T
# How to use: ln_of_K_CaHPO4_at_18degC = lnK_calcABC(18, param_CaHPO4)
################################################################################
# PART 2 -- Pitzer equations (based on Millero and Pierrot (1998))
# Table A1 (Millero and Pierrot, 1998; after Moller, 1988 & Greenberg and Moller, 1989) valid 0 to 250degC
param_NaCl = np.array([(1.43783204E01, 5.6076740E-3, -4.22185236E2,
-2.51226677E0, 0.0, -2.61718135E-6,
4.43854508, -1.70502337),
(-4.83060685E-1, 1.40677470E-3, 1.19311989E2,
0.0, 0.0, 0.0, 0.0, -4.23433299),
(-1.00588714E-1, -1.80529413E-5, 8.61185543E0,
1.2488095E-2, 0.0, 3.41172108E-8, 6.83040995E-2,
2.93922611E-1)])
# note that second value is changed to original ref (e-3 instead e01)
param_NaCl = reshaper(param_NaCl, T)
param_KCl = np.array([[2.67375563E1, 1.00721050E-2, -7.58485453E2,
-4.70624175, 0.0, -3.75994338E-6, 0.0, 0.0],
[-7.41559626, 0.0, 3.22892989E2, 1.16438557,
0.0, 0.0, 0.0, -5.94578140],
[-3.30531334, -1.29807848E-3, 9.12712100E1,
5.864450181E-1, 0.0, 4.95713573E-7, 0.0, 0.0]])
param_KCl = reshaper(param_KCl, T)
param_K2SO4 = np.array([[4.07908797E1, 8.26906675E-3, -1.418242998E3,
-6.74728848, 0.0, 0.0, 0.0, 0.0],
[-1.31669651E1, 2.35793239E-2, 2.06712592E3,
0.0, 0.0, 0.0, 0.0, 0.0],
[-1.88E-2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
param_K2SO4 = reshaper(param_K2SO4, T)
param_CaCl2 = np.array([[-9.41895832E1, -4.04750026E-2, 2.34550368E3,
1.70912300E1, -9.22885841E-1, 1.51488122E-5,
-1.39082000E0, 0.0],
[3.4787, -1.5417E-2, 0.0, 0.0, 0.0,
3.1791E-5, 0.0, 0.0],
[1.93056024E1, 9.77090932E-3, -4.28383748E2,
-3.57996343, 8.82068538E-2, -4.62270238E-6,
9.91113465, 0.0]])
param_CaCl2 = reshaper(param_CaCl2, T)
# [-3.03578731e1, 1.36264728e-2, 7.64582238e2, 5.50458061e0, -3.27377782e-1, 5.69405869e-6, -5.36231106e-1, 0]])
# param_CaCl2_Spencer = np.array([[-5.62764702e1, -3.00771997e-2, 1.05630400e-5, 3.3331626e-9, 1.11730349e3, 1.06664743e1],
# [3.4787e0, -1.5417e-2, 3.1791e-5, 0, 0, 0],
# [2.64231655e1, 2.46922993e-2, -2.48298510e-5, 1.22421864e-8, -4.18098427e2, -5.35350322e0]])
# param_CaSO4 = np.array([[0.015, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# [3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
# corrected after Greenberg and Moller 1989 0.015 instead of 0.15
# param_SrSO4 = param_CaSO4
# param_CaSO4_Spencer = np.array([[0.795e-1, -0.122e-3, 0.5001e-5, 0.6704e-8, -0.15228e3, -0.6885e-2],
# [0.28945e1, 0.7434e-2, 0.5287e-5, -0.101513e-6, -0.208505e4, 0.1345e1]])
def Equation_TabA1(T, Tinv, lnT, a):
return (a[:, 0] + a[:, 1] * T + a[:, 2] * Tinv + a[:, 3] * lnT + a[:, 4] /
(T - 263) + a[:, 5] * T**2 + a[:, 6] / (680 - T) + a[:, 7] / (T - 227))
def EquationSpencer(T, lnT, q):
return q[:, 0] + q[:, 1] * T + q[:, 2] * T * T + q[:, 3] * T**3 + q[:, 4] / T + q[:, 5] * lnT
# Table A2 (Millero and Pierrot, 1998; after Pabalan and Pitzer, 1987) valid 25 to 200degC
param_MgCl2 = np.array([[0.576066, -9.31654E-04, 5.93915E-07],
[2.60135, -0.0109438, 2.60169E-05],
[0.059532, -2.49949E-04, 2.41831E-07]])
param_MgCl2 = reshaper(param_MgCl2, T)
param_MgSO4 = np.array([[-1.0282, 8.4790E-03, -2.33667E-05,
2.1575E-08, 6.8402E-04, 0.21499],
[-2.9596E-01, 9.4564E-04, 0.0, 0.0,
1.1028E-02, 3.3646],
[4.2164E-01, -3.5726E-03, 1.0040E-05,
-9.3744E-09, -3.5160E-04, 2.7972E-02]])
param_MgSO4 = reshaper(param_MgSO4, T)
# param_MgSO4 = np.array([[-1.0282, 8.4790E-03, -2.33667E-05, 2.1575E-08, 6.8402E-04, 0.21499],[-2.9596E-01, 9.4564E-04, 0.0, 0.0, 1.1028E-02, 3.3646], [1.0541E-01, -8.9316E-04, 2.51E-06, -2.3436E-09, -8.7899E-05, 0.006993]]) # Cparams corrected after Pabalan and Pitzer ... but note that column lists Cmx not Cphi(=4xCmx) ... MP98 is correct
def Equation1_TabA2(T, q):
return q[:, 0] + q[:, 1] * T + q[:, 2] * T**2
def Equation2_TabA2(T, Tpower2, Tpower3, Tpower4, q):
return (q[:, 0] * ((T / 2) + (88804) / (2 * T) - 298) +
q[:, 1] * ((Tpower2 / 6) + (26463592) / (3 * T) - (88804 / 2)) +
q[:, 2] * (Tpower3 / 12 + 88804 * 88804 / (4 * T) - 26463592 / 3) +
q[:, 3] * ((Tpower4 / 20) + 88804 * 26463592 / (5 * T) - 88804 * 88804 / 4) +
q[:, 4] * (298 - (88804 / T)) +
q[:, 5])
# Table A3 (Millero and Pierrot, 1998; after mutiple studies, at least valid 0 to 50degC)
# param_NaHSO4 = np.array([[0.030101, -0.362E-3, 0.0], [0.818686, -0.019671, 0.0], [0.0, 0.0, 0.0]]) # corrected after Pierrot et al., 1997
param_NaHSO4 = np.array([[0.0544, -1.8478e-3, 5.3937e-5],
[0.3826401, -1.8431e-2, 0.0],
[0.003905, 0.0, 0.0]]) # corrected after Pierrot and Millero, 1997
param_NaHSO4 = reshaper(param_NaHSO4, T)
param_NaHCO3 = np.array([[0.028, 1.0E-3, -2.6E-5 / 2],
[0.044, 1.1E-3, -4.3E-5 / 2],
[0.0, 0.0, 0.0]]) # corrected after Peiper and Pitzer 1982
# param_Na2SO4 = np.array([[6.536438E-3, -30.197349, -0.20084955],
# [0.8742642, -70.014123, 0.2962095],
# [7.693706E-3, 4.5879201, 0.019471746]]) # corrected according to Hovey et al 1993; note also that alpha = 1.7, not 2
param_NaHCO3 = reshaper(param_NaHCO3, T)
param_Na2SO4_Moller = np.array([[81.6920027 + 0.0301104957 * T -
2321.93726 / T - 14.3780207 * lnT -
0.666496111 / (T - 263) - 1.03923656e-05 * T**2],
[1004.63018 + 0.577453682 * T -
21843.4467 / T - 189.110656 * lnT -
0.2035505488 / (T - 263) -
0.000323949532 * T**2 + 1467.72243 / (680 - T)],
[-80.7816886 - 0.0354521126 * T + 2024.3883 / T +
14.619773 * lnT - 0.091697474 / (T - 263) +
1.43946005e-05 * T**2 - 2.42272049 / (680 - T)]])
# Moller 1988 parameters as used in Excel MIAMI code !!!!!! careful this formula assumes alpha1=2 as opposed to alpha1=1.7 for the Hovey parameters
# XXXXX - - > need to go to the calculation of beta's (to switch Hovey / Moller) and of B et al (to switch alpha1
# param_Na2CO3 = np.array([[0.0362, 1.79E-3, 1.694E-21], [1.51, 2.05E-3, 1.626E-19], [0.0052, 0.0, 0.0]]) # Millero and Pierrot referenced to Peiper and Pitzer
param_Na2CO3 = np.array([[0.0362, 1.79E-3, -4.22E-5 / 2],
[1.51, 2.05E-3, -16.8E-5 / 2],
[0.0052, 0.0, 0.0]]) # Peiper and Pitzer 1982
param_Na2CO3 = reshaper(param_Na2CO3, T)
# XXXX check below if Haynes 2003 is being used.
param_NaBOH4 = np.array([[-0.051, 5.264E-3, 0.0],
[0.0961, -1.068E-2, 0.0],
[0.01498, -15.7E-4, 0.0]]) # corrected after Simonson et al 1987 5th param should be e-2
param_NaBOH4 = reshaper(param_NaBOH4, T)
def Equation_TabA3andTabA4andTabA5(Tabs, a):
return (a[:, 0] +
a[:, 1] * Tabs +
a[:, 2] * Tabs**2)
# def Equation_Na2SO4_TabA3(T, ln_of_Tdiv29815, a):
# return (a[:, 0] + a[:, 1] * ((1 / T) - (1 / 298.15)) + a[:, 2] * ln_of_Tdiv29815)
# Table A4 (Millero and Pierrot, 1998; after mutiple studies, at least valid 5 to 45degC)
param_KHCO3 = np.array([[-0.0107, 0.001, 0.0],
[0.0478, 0.0011, 6.776E-21],
[0.0, 0.0, 0.0]])
param_KHCO3 = reshaper(param_KHCO3, T)
param_K2CO3 = np.array([[0.1288, 1.1E-3, -5.1E-6],
[1.433, 4.36E-3, 2.07E-5],
[0.0005, 0.0, 0.0]])
param_K2CO3 = reshaper(param_K2CO3, T)
param_KBOH4 = np.array([[0.1469, 2.881E-3, 0.0],
[-0.0989, -6.876E-3, 0.0],
[-56.43 / 1000, -9.56E-3, 0.0]]) # corrected after Simonson et al 1988
param_KBOH4 = reshaper(param_KBOH4, T)
# same function as TabA3 "Equation_TabA3andTabA4andTabA5(Tabs,a)"
# Table A5 (<NAME>, 1998; after Simonson et al, 1987b; valid 5 - 55degC
param_MgBOH42 = np.array([[-0.623, 6.496E-3, 0.0],
[0.2515, -0.01713, 0.0],
[0.0, 0.0, 0.0]]) # corrected after Simonson et al 1988 first param is negative
param_MgBOH42 = reshaper(param_MgBOH42, T)
param_CaBOH42 = np.array([[-0.4462, 5.393E-3, 0.0],
[-0.868, -0.0182, 0.0],
[0.0, 0.0, 0.0]])
param_CaBOH42 = reshaper(param_CaBOH42, T)
param_SrBOH42 = param_CaBOH42 # see Table A6
def Equation_TabA3andTabA4andTabA5_Simonson(T, a):
return (a[:, 0] +
a[:, 1] * (T - 298.15) +
a[:, 2] * (T - 303.15) * (T - 303.15))
# Table A7 (<NAME> Pierrot, 1998; after multiple studies; valid 0 - 50degC
param_KOH = np.array([[0.1298, -0.946E-5, 9.914E-4],
[0.32, -2.59E-5, 11.86E-4],
[0.0041, 0.0638E-5, -0.944E-4]])
param_KOH = reshaper(param_KOH, T)
param_SrCl2 = np.array([[0.28575, -0.18367E-5, 7.1E-4],
[1.66725, 0.0E-5, 28.425E-4],
[-0.0013, 0.0E-5, 0.0E-4]])
param_SrCl2 = reshaper(param_SrCl2, T)
def Equation_TabA7(T, P):
return (P[:, 0] +
P[:, 1] * (8834524.639 - 88893.4225 *
P[:, 2]) * (1 / T - (1 / 298.15)) +
P[:, 1] / 6 * (T**2 - 88893.4225))
# Table A8 - - - Pitzer parameters unknown; beta's known for 25degC
Equation_KHSO4 = np.array([-0.0003, 0.1735, 0.0])
# Equation_MgHSO42 = np.array([0.4746, 1.729, 0.0]) # XX no Cphi #from Harvie et al 1984 as referenced in MP98
Equation_MgHSO42 = np.array([-0.61656 - 0.00075174 * Tabs,
7.716066 - 0.0164302 * Tabs,
0.43026 + 0.00199601 * Tabs]) # from Pierrot and Millero 1997 as used in the Excel file
# Equation_MgHCO32 = np.array([0.329, 0.6072, 0.0]) # Harvie et al 1984
Equation_MgHCO32 = np.array([0.03, 0.8, 0.0]) # Millero and Pierrot redetermined after Thurmond and Millero 1982
Equation_CaHSO42 = np.array([0.2145, 2.53, 0.0])
Equation_CaHCO32 = np.array([0.4, 2.977, 0.0]) # np.array([0.2, 0.3, 0]) He and Morse 1993 after Pitzeretal85 np.array([0.4, 2.977, 0.0])
Equation_CaOH2 = np.array([-0.1747, -0.2303, -5.72]) # according to Harvie84, the -5.72 should be for beta2, not Cphi (which is zero) -- but likely typo in original ref since 2:1 electrolytes don't usually have a beta2
Equation_SrHSO42 = Equation_CaHSO42
Equation_SrHCO32 = Equation_CaHCO32
Equation_SrOH2 = Equation_CaOH2
# Equation_MgOHCl = np.array([-0.1, 1.658, 0.0])
Equation_NaOH = np.array([0.0864, 0.253, 0.0044]) # Rai et al 2002 ref to Pitzer91(CRC Press)
Equation_CaSO4_PnM74 = np.array([0.2, 2.65, 0]) # Pitzer and Mayorga74
# Table A9 - - - (Millero and Pierrot, 1998; after multiple studies; valid 0 - 50degC
param_HCl = np.array([[1.2859, -2.1197e-3, -142.58770],
[-4.4474, 8.425698E-3, 665.7882],
[-0.305156, 5.16E-4, 45.521540]]) # beta1 first param corrected to negative according to original reference (Campbell et al)
param_HCl = reshaper(param_HCl, T)
# param_HSO4 = np.array([[0.065, 0.134945, 0.022374, 7.2E-5],
# [-15.009, -2.405945, 0.335839, -0.004379],
# [0.008073, -0.113106, -0.003553, 3.57E-5]]) # XXXXX two equations for C
# param_HSO4_Clegg94 = np.array([[0.0348925351, 4.97207803, 0.317555182, 0.00822580341],
# [-1.06641231, -74.6840429, -2.26268944, -0.0352968547],
# [0.00764778951, -0.314698817, -0.0211926525, 0.000586708222],
# [0.0, -0.176776695, -0.731035345, 0.0]])
def Equation_HCl(T, a):
return (a[:, 0] +
a[:, 1] * T +
a[:, 2] / T)
def Equation_HSO4(T, a):
return (a[:, 0] + (T - 328.15) * 1E-3 *
(a[:, 1] + (T - 328.15) *
((a[:, 2] / 2) + (T - 328.15) *
(a[:, 3] / 6))))
def Equation_HSO4_Clegg94(T, a):
return (a[:, 0] + (T - 328.15) *
(1E-3 * a[:, 1] + (T - 328.15) *
((1e-3 * a[:, 2] / 2) +
(T - 328.15) * 1e-3 * a[:, 3] / 6)))
############################################################
# beta_0, beta_1 and C_phi values arranged into arrays
N_cations = 6 # H+=0; Na+=1; K+=2; Mg2+=3; Ca2+=4; Sr2+=5
N_anions = 7 # OH-=0; Cl-=1; B(OH)4-=2; HCO3-=3; HSO4-=4; CO3-=5; SO4-=6;
beta_0 = np.zeros((N_cations, N_anions, *T.shape)) # creates empty array
beta_1 = np.zeros((N_cations, N_anions, *T.shape)) # creates empty array
C_phi = np.zeros((N_cations, N_anions, *T.shape)) # creates empty array
# H = cation
# [beta_0[0, 0], beta_1[0, 0], C_phi[0, 0]] = n / a
[beta_0[0, 1], beta_1[0, 1], C_phi[0, 1]] = Equation_HCl(T, param_HCl)
# [beta_0[0, 2], beta_1[0, 2], C_phi[0, 2]] = n / a
# [beta_0[0, 3], beta_1[0, 3], C_phi[0, 3]] = n / a
# [beta_0[0, 4], beta_1[0, 4], C_phi[0, 4]] = n / a
# [beta_0[0, 5], beta_1[0, 5], C_phi[0, 5]] = n / a
# [beta_0[0, 6], beta_1[0, 6], C_phi[0, 6]] = Equation_HSO4(T, param_HSO4)
# [beta_0[0, 6], beta_1[0, 6], C_phi[0, 6], C1_HSO4] = Equation_HSO4_Clegg94(T, param_HSO4_Clegg94)
C1_HSO4 = 0
# print beta_0[0, :], beta_1[0, :]#, beta_2[0, :]
# Na = cation
[beta_0[1, 0], beta_1[1, 0], C_phi[1, 0]] = Equation_NaOH
[beta_0[1, 1], beta_1[1, 1], C_phi[1, 1]] = Equation_TabA1(T, Tinv, lnT, param_NaCl)
[beta_0[1, 2], beta_1[1, 2], C_phi[1, 2]] = Equation_TabA3andTabA4andTabA5(Tabs, param_NaBOH4)
[beta_0[1, 3], beta_1[1, 3], C_phi[1, 3]] = Equation_TabA3andTabA4andTabA5(Tabs, param_NaHCO3)
[beta_0[1, 4], beta_1[1, 4], C_phi[1, 4]] = Equation_TabA3andTabA4andTabA5(Tabs, param_NaHSO4)
[beta_0[1, 5], beta_1[1, 5], C_phi[1, 5]] = Equation_TabA3andTabA4andTabA5(Tabs, param_Na2CO3)
[beta_0[1, 6], beta_1[1, 6], C_phi[1, 6]] = param_Na2SO4_Moller # Equation_Na2SO4_TabA3(T, ln_of_Tdiv29815, param_Na2SO4)
# K = cation
[beta_0[2, 0], beta_1[2, 0], C_phi[2, 0]] = Equation_TabA7(T, param_KOH)
[beta_0[2, 1], beta_1[2, 1], C_phi[2, 1]] = Equation_TabA1(T, Tinv, lnT, param_KCl)
[beta_0[2, 2], beta_1[2, 2], C_phi[2, 2]] = Equation_TabA3andTabA4andTabA5(Tabs, param_KBOH4)
[beta_0[2, 3], beta_1[2, 3], C_phi[2, 3]] = Equation_TabA3andTabA4andTabA5(Tabs, param_KHCO3)
[beta_0[2, 4], beta_1[2, 4], C_phi[2, 4]] = Equation_KHSO4
[beta_0[2, 5], beta_1[2, 5], C_phi[2, 5]] = Equation_TabA3andTabA4andTabA5(Tabs, param_K2CO3)
[beta_0[2, 6], beta_1[2, 6], C_phi[2, 6]] = Equation_TabA1(T, Tinv, lnT, param_K2SO4)
# Mg = cation
# [beta_0[3, 0], beta_1[3, 0], C_phi[3, 0]] = n / a
[beta_0[3, 1], beta_1[3, 1], C_phi[3, 1]] = Equation1_TabA2(T, param_MgCl2)
[beta_0[3, 2], beta_1[3, 2], C_phi[3, 2]] = Equation_TabA3andTabA4andTabA5_Simonson(T, param_MgBOH42)
[beta_0[3, 3], beta_1[3, 3], C_phi[3, 3]] = Equation_MgHCO32
[beta_0[3, 4], beta_1[3, 4], C_phi[3, 4]] = Equation_MgHSO42
# [beta_0[3, 5], beta_1[3, 5], C_phi[3, 5]] = n / a
[beta_0[3, 6], beta_1[3, 6], C_phi[3, 6]] = Equation2_TabA2(T, Tpower2, Tpower3, Tpower4, param_MgSO4)
# print beta_0[3, 6], beta_1[3, 6], C_phi[3, 6]
# Ca = cation
[beta_0[4, 0], beta_1[4, 0], C_phi[4, 0]] = Equation_CaOH2
[beta_0[4, 1], beta_1[4, 1], C_phi[4, 1]] = Equation_TabA1(T, Tinv, lnT, param_CaCl2)
[beta_0[4, 2], beta_1[4, 2], C_phi[4, 2]] = Equation_TabA3andTabA4andTabA5_Simonson(T, param_CaBOH42)
[beta_0[4, 3], beta_1[4, 3], C_phi[4, 3]] = Equation_CaHCO32
[beta_0[4, 4], beta_1[4, 4], C_phi[4, 4]] = Equation_CaHSO42
# [beta_0[4, 5], beta_1[4, 5], C_phi[4, 5]] = n / a
[beta_0[4, 6], beta_1[4, 6], C_phi[4, 6]] = Equation_CaSO4_PnM74 # Equation_TabA1(T, Tinv, lnT, param_CaSO4)
# Sr = cation
[beta_0[5, 0], beta_1[5, 0], C_phi[5, 0]] = Equation_SrOH2
[beta_0[5, 1], beta_1[5, 1], C_phi[5, 1]] = Equation_TabA7(T, param_SrCl2)
[beta_0[5, 2], beta_1[5, 2], C_phi[5, 2]] = Equation_TabA3andTabA4andTabA5_Simonson(T, param_SrBOH42)
[beta_0[5, 3], beta_1[5, 3], C_phi[5, 3]] = Equation_SrHCO32
[beta_0[5, 4], beta_1[5, 4], C_phi[5, 4]] = Equation_SrHSO42
# [beta_0[5, 5], beta_1[5, 5], C_phi[5, 5]] = n / a
[beta_0[5, 6], beta_1[5, 6], C_phi[5, 6]] = Equation_CaSO4_PnM74 # Equation_TabA1(T, Tinv, lnT, param_SrSO4)
# for 2:2 ion pairs beta_2 is needed
beta_2 = np.zeros((N_cations, N_anions, *T.shape))
b2_param_MgSO4 = np.array([-13.764, 0.12121, -2.7642e-4, 0, -0.21515, -32.743])
def Eq_b2_MgSO4(T, Tpower2, Tpower3, Tpower4, q):
return (q[0] * ((T / 2) + (88804) / (2 * T) - 298) +
q[1] * ((Tpower2 / 6) + (26463592) / (3 * T) - (88804 / 2)) +
q[2] * (Tpower3 / 12 + 88804 * 88804 / (4 * T) - 26463592 / 3) +
q[3] * ((Tpower4 / 20) + 88804 * 26463592 / (5 * T) - 88804 * 88804 / 4) +
q[4] * (298 - (88804 / T)) +
q[5])
b2_param_MgBOH42 = np.array([-11.47, 0.0, -3.24e-3])
b2_param_CaBOH42 = np.array([-15.88, 0.0, -2.858e-3])
def Eq_b2_MgANDCaBOH42(T, a):
return a[0] + a[1] * (T - 298.15) + a[2] * (T - 303.15) * (T - 303.15)
b2_param_CaSO4 = np.array([-55.7, 0]) # Pitzer and Mayorga74 # [-1.29399287e2, 4.00431027e-1]) Moller88
def Eq_b2_CaSO4(T, a):
return a[0] + a[1] * T
beta_2[3, 6] = Eq_b2_MgSO4(T, Tpower2, Tpower3, Tpower4, b2_param_MgSO4)
beta_2[3, 2] = Eq_b2_MgANDCaBOH42(T, b2_param_MgBOH42)
beta_2[4, 2] = Eq_b2_MgANDCaBOH42(T, b2_param_CaBOH42)
beta_2[4, 6] = Eq_b2_CaSO4(T, b2_param_CaSO4)
beta_2[5, 2] = beta_2[4, 2]
#############################################################################
#############################################################################
# Data and T - based calculations to create arrays holding Theta and Phi values
# based on Table A10 and A11
# Theta of positive ions H+=0; Na+=1; K+=2; Mg2+=3; Ca2+=4; Sr2+=5
Theta_positive = np.zeros((6, 6, *T.shape)) # Array to hold Theta values between ion two ions (for numbering see list above)
# H - Sr
Theta_positive[0, 5] = 0.0591 + 4.5 * 1E-4 * Tabs
Theta_positive[5, 0] = Theta_positive[0, 5]
# H - Na
Theta_positive[0, 1] = 0.03416 - 2.09 * 1E-4 * Tabs
Theta_positive[1, 0] = Theta_positive[0, 1]
# H - K
Theta_positive[0, 2] = 0.005 - 2.275 * 1E-4 * Tabs
Theta_positive[2, 0] = Theta_positive[0, 2]
# H - Mg
Theta_positive[0, 3] = 0.062 + 3.275 * 1E-4 * Tabs
Theta_positive[3, 0] = Theta_positive[0, 3]
# H - Ca
Theta_positive[0, 4] = 0.0612 + 3.275 * 1E-4 * Tabs
Theta_positive[4, 0] = Theta_positive[0, 4]
# Na - K
Theta_positive[1, 2] = -5.02312111E-2 + 14.0213141 / T
Theta_positive[2, 1] = Theta_positive[1, 2]
# Na - Mg
Theta_positive[1, 3] = 0.07
Theta_positive[3, 1] = 0.07
# Na - Ca
Theta_positive[1, 4] = 0.05
Theta_positive[4, 1] = 0.05
# K - Mg
Theta_positive[2, 3] = 0.0
Theta_positive[3, 2] = 0.0
# K - Ca
Theta_positive[2, 4] = 0.1156
Theta_positive[4, 2] = 0.1156
# Sr - Na
Theta_positive[5, 1] = 0.07
Theta_positive[1, 5] = 0.07
# Sr - K
Theta_positive[5, 2] = 0.01
Theta_positive[2, 5] = 0.01
# Mg - Ca
Theta_positive[3, 4] = 0.007
Theta_positive[4, 3] = 0.007
# print 5.31274136 - 6.3424248e-3 * T - 9.83113847e2 / T, "ca - mg" #Spencer et al 1990
# Theta of negative ions OH-=0; Cl-=1; B(OH)4-=2; HCO3-=3; HSO4-=4; CO3-=5; SO4-=6;
Theta_negative = np.zeros((7, 7, *T.shape)) # Array to hold Theta values between ion two ions (for numbering see list above)
# Cl - SO4
Theta_negative[1, 6] = 0.07
Theta_negative[6, 1] = 0.07
# Cl - CO3
Theta_negative[1, 5] = -0.092 # corrected after Pitzer and Peiper 1982
Theta_negative[5, 1] = -0.092 # corrected after Pitzer and Peiper 1982
# Cl - HCO3
Theta_negative[1, 3] = 0.0359
Theta_negative[3, 1] = 0.0359
# Cl - BOH4
Theta_negative[1, 2] = (-0.0323 - 0.42333 * 1E-4 * Tabs -
21.926 * 1E-6 * Tabs**2)
Theta_negative[2, 1] = Theta_negative[1, 2]
# CO3 - HCO3
Theta_negative[3, 5] = 0.0
Theta_negative[5, 3] = 0.0
# SO4 - HSO4
Theta_negative[4, 6] = 0.0
Theta_negative[6, 4] = 0.0
# OH - Cl
Theta_negative[0, 1] = (-0.05 + 3.125 * 1E-4 * Tabs -
8.362 * 1E-6 * Tabs**2)
Theta_negative[1, 0] = Theta_negative[0, 1]
# SO4 - CO3
Theta_negative[5, 6] = 0.02
Theta_negative[6, 5] = 0.02
# SO4 - HCO3
Theta_negative[3, 6] = 0.01
Theta_negative[6, 3] = 0.01
# SO4 - BOH4
Theta_negative[2, 6] = -0.012
Theta_negative[6, 2] = -0.012
# HSO4 - Cl
Theta_negative[1, 4] = -0.006
Theta_negative[4, 1] = -0.006
# OH - SO4
Theta_negative[0, 6] = -0.013
Theta_negative[6, 0] = -0.013
# CO3 - OH #http: / /www.aim.env.uea.ac.uk / aim / accent4 / parameters.html
Theta_negative[3, 0] = 0.1
Theta_negative[0, 3] = 0.1
# Phi
# positive ions H+=0; Na+=1; K+=2; Mg2+=3; Ca2+=4; Sr2+=5
# negative ions OH-=0; Cl-=1; B(OH)4-=2; HCO3-=3; HSO4-=4; CO3-=5; SO4-=6;
# Phi_PPN holds the values for cation - cation - anion
Phi_PPN = np.zeros((6, 6, 7, *T.shape)) # Array to hold Theta values between ion two ions (for numbering see list above)
# Na - K-Cl
Phi_PPN[1, 2, 1] = 1.34211308E-2 - 5.10212917 / T
Phi_PPN[2, 1, 1] = Phi_PPN[1, 2, 1]
# Na - K-SO4
Phi_PPN[1, 2, 6] = 3.48115174E-2 - 8.21656777 / T
Phi_PPN[2, 1, 6] = Phi_PPN[1, 2, 6]
# Na - Mg - Cl
Phi_PPN[1, 3, 1] = 0.0199 - 9.51 / T
Phi_PPN[3, 1, 1] = Phi_PPN[1, 3, 1]
# Na - Ca - Cl
Phi_PPN[1, 4, 1] = (-7.6398 - 1.2990e-2 * T +
1.1060e-5 * T**2 + 1.8475 * lnT) # Spencer et al 1990 # -0.003
Phi_PPN[4, 1, 1] = Phi_PPN[1, 4, 1]
# print -7.6398 -1.2990e-2 * T + 1.1060e-5 * T*T + 1.8475 * lnT
# Na - Ca - SO4
Phi_PPN[1, 4, 6] = -0.012
Phi_PPN[4, 1, 6] = Phi_PPN[1, 4, 6]
# K - Mg - Cl
Phi_PPN[2, 3, 1] = 0.02586 - 14.27 / T
Phi_PPN[3, 2, 1] = Phi_PPN[2, 3, 1]
# K - Ca - Cl
Phi_PPN[2, 4, 1] = 0.047627877 - 27.0770507 / T
Phi_PPN[4, 2, 1] = Phi_PPN[2, 4, 1]
# K - Ca - SO4
Phi_PPN[2, 4, 6] = 0.0
Phi_PPN[4, 2, 6] = 0.0
# H - Sr - Cl
Phi_PPN[0, 5, 1] = 0.0054 - 2.1 * 1E-4 * Tabs
Phi_PPN[5, 0, 1] = Phi_PPN[0, 5, 1]
# H - Mg - Cl
Phi_PPN[0, 3, 1] = 0.001 - 7.325 * 1E-4 * Tabs
Phi_PPN[3, 0, 1] = Phi_PPN[0, 3, 1]
# H - Ca - Cl
Phi_PPN[0, 4, 1] = 0.0008 - 7.25 * 1E-4 * Tabs
Phi_PPN[4, 0, 1] = Phi_PPN[0, 4, 1]
# Sr - Na - Cl
Phi_PPN[5, 1, 1] = -0.015
Phi_PPN[1, 5, 1] = -0.015
# Sr - K-Cl
Phi_PPN[5, 2, 1] = -0.015
Phi_PPN[2, 5, 1] = -0.015
# Na - Mg - SO4
Phi_PPN[1, 3, 6] = -0.015
Phi_PPN[3, 1, 6] = -0.015
# K - Mg - SO4
Phi_PPN[2, 3, 6] = -0.048
Phi_PPN[3, 2, 6] = -0.048
# Mg - Ca - Cl
Phi_PPN[3, 4, 1] = (4.15790220e1 + 1.30377312e-2 * T -
9.81658526e2 / T - 7.4061986 * lnT) # Spencer et al 1990 # - 0.012
Phi_PPN[4, 3, 1] = Phi_PPN[3, 4, 1]
# print 4.15790220e1 + 1.30377312e-2 * T -9.81658526e2 / T -7.4061986 * lnT
# Mg - Ca - SO4
Phi_PPN[3, 4, 6] = 0.024
Phi_PPN[4, 3, 6] = 0.024
# H - Na - Cl
Phi_PPN[0, 1, 1] = 0.0002
Phi_PPN[1, 0, 1] = 0.0002
# H - Na - SO4
Phi_PPN[0, 1, 6] = 0.0
Phi_PPN[1, 0, 6] = 0.0
# H - K-Cl
Phi_PPN[0, 2, 1] = -0.011
Phi_PPN[2, 0, 1] = -0.011
# H - K-SO4
Phi_PPN[0, 2, 1] = 0.197
Phi_PPN[2, 0, 1] = 0.197
# Phi_PPN holds the values for anion - anion - cation
Phi_NNP = np.zeros((7, 7, 6, *T.shape)) # Array to hold Theta values between ion two ions (for numbering see list above)
# Cl - SO4 - Na
Phi_NNP[1, 6, 1] = -0.009
Phi_NNP[6, 1, 1] = -0.009
# Cl - SO4 - K
Phi_NNP[1, 6, 2] = -0.21248147 + 37.5619614 / T + 2.8469833 * 1E-3 * T
Phi_NNP[6, 1, 2] = Phi_NNP[1, 6, 2]
# Cl - SO4 - Ca
Phi_NNP[1, 6, 4] = -0.018
Phi_NNP[6, 1, 4] = -0.018
# Cl - CO3 - Ca
Phi_NNP[1, 5, 4] = 0.016
Phi_NNP[5, 1, 4] = 0.016
# Cl - HCO3 - Na
Phi_NNP[1, 3, 1] = -0.0143
Phi_NNP[3, 1, 1] = -0.0143
# Cl - BOH4 - Na
Phi_NNP[1, 2, 1] = -0.0132
Phi_NNP[2, 1, 1] = -0.0132
# Cl - BOH4 - Mg
Phi_NNP[1, 2, 3] = -0.235
Phi_NNP[2, 1, 3] = -0.235
# Cl - BOH4 - Ca
Phi_NNP[1, 2, 4] = -0.8
Phi_NNP[2, 1, 4] = -0.8
# HSO4 - SO4 - Na
Phi_NNP[4, 6, 1] = 0.0
Phi_NNP[6, 4, 1] = 0.0
# CO3 - HCO3 - Na
Phi_NNP[3, 5, 1] = 0.0
Phi_NNP[5, 3, 1] = 0.0
# CO3 - HCO3 - K
Phi_NNP[3, 5, 2] = 0.0
Phi_NNP[5, 3, 2] = 0.0
# Cl - SO4 - Mg
Phi_NNP[1, 6, 3] = -0.004
Phi_NNP[6, 1, 3] = -0.004
# Cl - HCO3 - Mg
Phi_NNP[1, 3, 3] = -0.0196
Phi_NNP[3, 1, 3] = -0.0196
# SO4 - CO3 - Na
Phi_NNP[6, 5, 1] = -0.005
Phi_NNP[5, 6, 1] = -0.005
# SO4 - CO3 - K
Phi_NNP[6, 5, 2] = -0.009
Phi_NNP[5, 6, 2] = -0.009
# SO4 - HCO3 - Na
Phi_NNP[6, 3, 1] = -0.005
Phi_NNP[3, 6, 1] = -0.005
# SO4 - HCO3 - Mg
Phi_NNP[6, 3, 3] = -0.161
Phi_NNP[3, 6, 3] = -0.161
# HSO4 - Cl - Na
Phi_NNP[4, 1, 1] = -0.006
Phi_NNP[1, 4, 1] = -0.006
# HSO4 - SO4 - K
Phi_NNP[4, 6, 2] = -0.0677
Phi_NNP[6, 4, 2] = -0.0677
# OH - Cl - Na
Phi_NNP[0, 1, 1] = -0.006
Phi_NNP[1, 0, 1] = -0.006
# OH - Cl - K
Phi_NNP[0, 1, 2] = -0.006
Phi_NNP[1, 0, 2] = -0.006
# OH - Cl - Ca
Phi_NNP[0, 1, 4] = -0.025
Phi_NNP[1, 0, 4] = -0.025
# OH - SO4 - Na
Phi_NNP[0, 6, 1] = -0.009
Phi_NNP[6, 0, 1] = -0.009
# OH - SO4 - K
Phi_NNP[0, 6, 2] = -0.05
Phi_NNP[6, 0, 2] = -0.05
return (beta_0, beta_1, beta_2, C_phi,
Theta_negative, Theta_positive,
Phi_NNP, Phi_PPN, C1_HSO4)
# Functions from K_HSO4_thermo.py
# --------------------------------------
def supplyKHSO4(T, Istr):
"""
Calculate KHSO4 for given temperature and salinity
"""
Istr = pow(Istr, 1)
# param_HSO4 = np.array([562.69486, -13273.75, -102.5154, 0.2477538, -1.117033e-4]) #Clegg et al. 1994
# K_HSO4 = np.power(10,param_HSO4[0] + param_HSO4[1]/T + param_HSO4[2]*np.log(T) + param_HSO4[3]*T + param_HSO4[4]*T*T)
param_HSO4 = np.array([141.411, -4340.704, -23.4825, 0.016637]) # Campbell et al. 1993
# param_HSO4 = np.array([141.328, -4276.1, -23.093, 0]) #Dickson 1990
# param_HSO4 = np.array([141.411, -4340.704, -23.4825, 0.016637])
K_HSO4 = np.power(10, (param_HSO4[0] +
param_HSO4[1] / T +
param_HSO4[2] * np.log(T) +
param_HSO4[3] * T))
param_HSO4_cond = np.array([141.328, -4276.1, -23.093,
324.57, -13856, -47.986,
-771.54, 35474, 114.723,
-2698, 1776]) # Dickson 1990
K_HSO4_cond = np.exp(param_HSO4_cond[0] +
param_HSO4_cond[1] / T +
param_HSO4_cond[2] * np.log(T) + np.sqrt(Istr) *
(param_HSO4_cond[3] +
param_HSO4_cond[4] / T +
param_HSO4_cond[5] * np.log(T)) + Istr *
(param_HSO4_cond[6] +
param_HSO4_cond[7] / T +
param_HSO4_cond[8] * np.log(T)) +
param_HSO4_cond[9] / T * Istr * np.sqrt(Istr) +
param_HSO4_cond[10] / T * Istr * Istr)
return [K_HSO4_cond, K_HSO4]
# Functions from K_HF_cond.py
# --------------------------------------
def supplyKHF(T, sqrtI):
return np.exp(1590.2 / T - 12.641 + 1.525 * sqrtI)
# Functions from gammaANDalpha.py
# --------------------------------------
def CalculateGammaAndAlphas(Tc, S, Istr, m_cation, m_anion):
# Testbed case T=25C, I=0.7, seawatercomposition
T = Tc + 273.15
sqrtI = np.sqrt(Istr)
Z_cation = np.zeros((6, 1, 1))
Z_cation[0] = 1
Z_cation[1] = 1
Z_cation[2] = 1
Z_cation[3] = 2
Z_cation[4] = 2
Z_cation[5] = 2
Z_anion = np.zeros((7, 1, 1))
Z_anion[0] = -1
Z_anion[1] = -1
Z_anion[2] = -1
Z_anion[3] = -1
Z_anion[4] = -1
Z_anion[5] = -2
Z_anion[6] = -2
##########################################################################
[beta_0, beta_1, beta_2, C_phi, Theta_negative, Theta_positive,
Phi_NNP, Phi_PPN, C1_HSO4] = SupplyParams(T)
A_phi = 3.36901532E-01 - 6.32100430E-04 * T + 9.14252359 / T - 1.35143986E-02 * np.log(T) + 2.26089488E-03 / (
T - 263) + 1.92118597E-6 * T * T + 4.52586464E+01 / (680 - T) # note correction of last parameter, E + 1 instead of E-1
# A_phi = 8.66836498e1 + 8.48795942e-2 * T - 8.88785150e-5 * T * T +
# 4.88096393e-8 * T * T * T -1.32731477e3 / T - 1.76460172e1 * np.log(T)
# # Spencer et al 1990
f_gamma = -A_phi * (sqrtI / (1 + 1.2 * sqrtI) +
(2 / 1.2) * np.log(1 + 1.2 * sqrtI))
# E_cat = sum(m_cation * Z_cation)
E_an = -sum(m_anion * Z_anion)
E_cat = -E_an
# BMX_phi
BMX_phi = np.zeros((6, 7, *Tc.shape))
BMX = np.zeros((6, 7, *Tc.shape))
BMX_apostroph = np.zeros((6, 7, *Tc.shape))
CMX = np.zeros((6, 7, *Tc.shape))
for cat in range(0, 6):
for an in range(0, 7):
BMX_phi[cat, an] = (beta_0[cat, an] +
beta_1[cat, an] * np.exp(-2 * sqrtI))
BMX[cat, an] = (beta_0[cat, an] + (beta_1[cat, an] / (2 * Istr)) *
(1 - (1 + 2 * sqrtI) * np.exp(-2 * sqrtI)))
BMX_apostroph[cat, an] = ((beta_1[cat, an] / (2 * Istr * Istr)) *
(-1 + (1 + (2 * sqrtI) + (2 * sqrtI)) *
np.exp(-2 * sqrtI)))
CMX[cat, an] = C_phi[cat, an] / (2 * np.sqrt(-Z_anion[an] * Z_cation[cat]))
# BMX* and CMX are calculated differently for 2:2 ion pairs, corrections
# below # § alpha2= 6 for borates ... see Simonson et al 1988
cat = 3
an = 2 # MgBOH42
BMX_phi[cat, an] = (beta_0[cat, an] + beta_1[cat, an] *
np.exp(-1.4 * sqrtI) + beta_2[cat, an] *
np.exp(-6 * sqrtI))
BMX[cat, an] = (beta_0[cat, an] + (beta_1[cat, an] / (0.98 * Istr)) *
(1 - (1 + 1.4 * sqrtI) * np.exp(-1.4 * sqrtI)) +
(beta_2[cat, an] / (18 * Istr)) *
(1 - (1 + 6 * sqrtI) * np.exp(-6 * sqrtI)))
BMX_apostroph[cat, an] = ((beta_1[cat, an] / (0.98 * Istr * Istr)) *
(-1 + (1 + 1.4 * sqrtI + 0.98 * Istr) * np.exp(-1.4 * sqrtI)) +
(beta_2[cat, an] / (18 * Istr)) *
(-1 - (1 + 6 * sqrtI + 18 * Istr) * np.exp(-6 * sqrtI)))
cat = 3
an = 6 # MgSO4
BMX_phi[cat, an] = (beta_0[cat, an] + beta_1[cat, an] *
np.exp(-1.4 * sqrtI) + beta_2[cat, an] *
np.exp(-12 * sqrtI))
BMX[cat, an] = (beta_0[cat, an] + (beta_1[cat, an] / (0.98 * Istr)) *
(1 - (1 + 1.4 * sqrtI) * np.exp(-1.4 * sqrtI)) +
(beta_2[cat, an] / (72 * Istr)) *
(1 - (1 + 12 * sqrtI) * np.exp(-12 * sqrtI)))
BMX_apostroph[cat, an] = ((beta_1[cat, an] / (0.98 * Istr * Istr)) *
(-1 + (1 + 1.4 * sqrtI + 0.98 * Istr) * np.exp(-1.4 * sqrtI)) +
(beta_2[cat, an] / (72 * Istr * Istr)) *
(-1 - (1 + 12 * sqrtI + 72 * Istr) * np.exp(-12 * sqrtI)))
# BMX_apostroph[cat, an] = (beta_1[cat, an] / (0.98 * Istr)) * (-1 + (1 + 1.4
# * sqrtI + 0.98 * Istr) * np.exp(-1.4 * sqrtI)) + (beta_2[cat, an] / (72 *
# Istr)) * (-1-(1 + 12 * sqrtI + 72 * Istr) * np.exp(-12 * sqrtI)) # not 1 /
# (0.98 * Istr * Istr) ... compare M&P98 equation A17 with Pabalan and Pitzer
# 1987 equation 15c / 16b
cat = 4
an = 2 # CaBOH42
BMX_phi[cat, an] = (beta_0[cat, an] + beta_1[cat, an] *
np.exp(-1.4 * sqrtI) + beta_2[cat, an] *
np.exp(-6 * sqrtI))
BMX[cat, an] = (beta_0[cat, an] + (beta_1[cat, an] / (0.98 * Istr)) *
(1 - (1 + 1.4 * sqrtI) * np.exp(-1.4 * sqrtI)) +
(beta_2[cat, an] / (18 * Istr)) *
(1 - (1 + 6 * sqrtI) * np.exp(-6 * sqrtI)))
BMX_apostroph[cat, an] = ((beta_1[cat, an] / (0.98 * Istr * Istr)) *
(-1 + (1 + 1.4 * sqrtI + 0.98 * Istr) * np.exp(-1.4 * sqrtI)) +
(beta_2[cat, an] / (18 * Istr)) *
(-1 - (1 + 6 * sqrtI + 18 * Istr) *
|
np.exp(-6 * sqrtI)
|
numpy.exp
|
import itertools
from common import ScanMode, ScanResult
import collections
import cv2
import enum
import functools
import json
import numpy
import os
from typing import Dict, Iterator, List, Tuple
# The expected color for the video background.
BG_COLOR = numpy.array([207, 238, 240])
class CritterType(enum.Enum):
INSECTS = 1
FISH = 2
SEA_CREATURES = 3
@classmethod
def from_str(cls, value: str) -> 'CritterType':
key = value.upper().replace(' ', '_')
return cls.__members__[key]
class CritterImage:
"""The image and data associated with a critter icon."""
def __init__(self, critter_name: str, critter_type: CritterType, icon_name: str):
img_path = os.path.join('critters', 'generated', icon_name)
self.img = cv2.imread(img_path)
self.critter_name = critter_name
self.critter_type = critter_type
self.icon_name = icon_name
def __repr__(self):
return f'CritterIcon({self.critter_name!r}, {self.critter_type!r}, {self.icon_name!r})'
class CritterIcon(numpy.ndarray):
"""Dummy ndarray subclass to hold critter type info."""
critter_type: CritterType
def detect(frame: numpy.ndarray) -> bool:
"""Detects if a given frame is showing Critterpedia."""
color = frame[:20, 1100:1150].mean(axis=(0, 1))
return
|
numpy.linalg.norm(color - BG_COLOR)
|
numpy.linalg.norm
|
# -*- coding: future_fstrings -*-
from __future__ import division
import numpy as np
import cv2
from skimage.transform import warp
from skimage.transform import AffineTransform
import types
import imageio
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug.augmentables.segmaps import SegmentationMapOnImage
class SimpleNamespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def dict2class(args_dict):
args = SimpleNamespace()
args.__dict__.update(**args_dict)
return args
def increase_color(img, brightness_offset):
img = img + brightness_offset # data type is now np.float
img[img > 255] = 255
img[img < 0] = 0
img = img.astype(np.uint8)
return img
class ImgAugmenter(object):
def __init__(self, args_dict):
self._init_args()
# This is fixed. Please modify directly in this function
self._set_args_for_background_augment()
self.update_args_for_template_augment(args_dict)
def _init_args(self):
args_dict = {
# desired object size relative to the background image
"rela_size": (0.1, 0.3),
"rescale": (1, 1), # scale again randomly
"rotation": (0, 0),
"shear": (0, 0),
"translate_x": (0, 1),
"translate_y": (0, 1),
"brightness_offset": (0, 0),
}
self.args = dict2class(args_dict)
def _set_args_for_background_augment(self):
# Add effects to the background image
self.augseq_transforms = iaa.Sequential([
iaa.CropAndPad(percent=(-0.1, 0.1), pad_mode="edge"),
iaa.Affine(
rotate=(-10, 10),
scale=(0.95, 1.05),
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
shear=(-1, 1),
),
])
self.augseq_noises = iaa.Sequential([
iaa.Multiply((0.5, 1.5), per_channel=False),
iaa.Add((-10, 10), per_channel=True),
iaa.AdditiveGaussianNoise(scale=0.03*255),
iaa.GaussianBlur(sigma=(0, 0.2))
# iaa.Sharpen(alpha=0.5)
#iaa.CoarseDropout(0.1, size_percent=0.2),
#iaa.ElasticTransformation(alpha=10, sigma=1)
])
def update_args_for_template_augment(self, args_dict):
for key, val in args_dict.items():
if hasattr(self.args, key):
setattr(self.args, key, val)
def _rand_values_from_args(self, args):
''' generate a random value given a range (l, r) from each argument in args '''
# Generate random value
args_dict = {}
for arg_name, arg_val in args.__dict__.items():
l, r = arg_val
if l == r:
args_dict[arg_name] = l
else:
args_dict[arg_name] =
|
np.random.uniform(l, r)
|
numpy.random.uniform
|
#!/usr/bin/env python
#
# Inspired by g_mmpbsa code.
# #
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
from builtins import range
from builtins import object
import re
import numpy as np
import argparse
import sys
import os
import math
import time
from copy import deepcopy
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
from matplotlib.colors import ListedColormap
import mdtraj as md
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import converters
from molmolpy.utils import plot_tools
from molmolpy.utils import pdb_tools
from molmolpy.utils import folder_utils
from molmolpy.utils import extra_tools
from molmolpy.utils import pymol_tools
from molmolpy.utils import protein_analysis
class EnergyAnalysisObject(object):
"""
Usage Example
>>> from molmolpy.moldyn import md_analysis
>>> from molmolpy.g_mmpbsa import mmpbsa_analyzer
>>>
>>> import os
>>>
>>> # In[3]:
>>>
>>> folder_to_sim = '/media/Work/SimData/g_mmpbsa/HSL/HSL_1_backbone/Cluster1/'
>>>
>>> molmech = folder_to_sim + 'contrib_MM.dat'
>>> polar = folder_to_sim + 'contrib_pol.dat'
>>> apolar = folder_to_sim + 'contrib_apol.dat'
>>>
>>> LasR_energy_object = mmpbsa_analyzer.EnergyAnalysisObject(molmech, polar, apolar,
>>> sim_num=3)
>>>
>>> LasR_energy_object.plot_bar_energy_residues()
>>> LasR_energy_object.plot_most_contributions()
>>> LasR_energy_object.plot_sorted_contributions()
>>>
>>>
>>> centroid_file = '/media/Work/MEGA/Programming/docking_LasR/HSL_1_v8/centroid.pdb'
>>>
>>>
>>> LasR_energy_object.add_centroid_pdb_file(centroid_file)
>>> LasR_energy_object.save_mmpbsa_analysis_pickle('HSL_simulation_cluster3.pickle')
>>> #LasR_energy_object.visualize_interactions_pymol()
>>>
>>>
>>> test = 1
>>> # simulation_name = 'LasR_Ligand_simulation'
>>> #
Molecule object loading of pdb and pbdqt file formats.
Then converts to pandas dataframe.
Create MoleculeObject by parsing pdb or pdbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule information in pandas dataframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filename : str, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Convert gro to PDB so mdtraj recognises topology
YEAH
gmx editconf -f npt.gro -o npt.pdb
"""
# @profile
def __init__(self,
energymm_xvg,
polar_xvg,
apolar_xvg,
molmech,
polar,
apolar,
bootstrap=True,
bootstrap_steps=5000,
sim_num=1,
receptor_name='LasR',
molecule_name='HSL',
meta_file=None
):
self.receptor_name = receptor_name
self.molecule_name = molecule_name
self.sim_num = sim_num
self.simulation_name = self.receptor_name + '_' + self.molecule_name + '_num:' + str(self.sim_num)
self.meta_file = meta_file
# molmech = folder_to_sim + 'contrib_MM.dat'
# polar = folder_to_sim + 'contrib_pol.dat'
# apolar = folder_to_sim + 'contrib_apol.dat'
# Complex Energy
c = []
if meta_file is not None:
MmFile, PolFile, APolFile = ReadMetafile(meta_file)
for i in range(len(MmFile)):
cTmp = Complex(MmFile[i], PolFile[i], APolFile[i], K[i])
cTmp.CalcEnergy(args, frame_wise, i)
c.append(cTmp)
else:
cTmp = Complex(energymm_xvg, polar_xvg, apolar_xvg)
self.cTmp = cTmp
self.full_copy_original = deepcopy(cTmp)
self.full_copy_bootstrap = deepcopy(cTmp)
# cTmp.CalcEnergy(frame_wise, 0, bootstrap=bootstrap, bootstrap_steps=bootstrap_steps)
# c.append(cTmp)
# Summary in output files => "--outsum" and "--outmeta" file options
# TODO adapt to make able to use bootstrap as well, multiple analysis modes?
self.c = c
# summary_output_filename = self.simulation_name + '_binding_summary.log'
# Summary_Output_File(c, summary_output_filename, meta_file)
#
# corr_outname = self.simulation_name + '_correllation_distance.log'
# corr_plot = self.simulation_name + '_correllation_plot.png'
test = 1
# This won't work it needs K, read paper again
#FitCoef_all = PlotCorr(c, corr_outname, corr_plot, bootstrap_steps)
#PlotEnrgy(c, FitCoef_all, args, args.enplot)
# RESIDUE analysis part
self.MMEnData, self.resnameA = ReadData_Residue_Parse(molmech)
self.polEnData, self.resnameB = ReadData_Residue_Parse(polar)
self.apolEnData, self.resnameC = ReadData_Residue_Parse(apolar)
self.resname = CheckResname(self.resnameA, self.resnameB, self.resnameC)
self.sim_num = sim_num
Residues = []
data = []
columns_residue_energy = ['index', 'ResidueNum', 'Residue', 'TotalEnergy', 'TotalEnergySD']
for i in range(len(self.resname)):
CheckEnData_residue(self.MMEnData[i], self.polEnData[i], self.apolEnData[i])
r = Residue()
r.CalcEnergy(self.MMEnData[i], self.polEnData[i], self.apolEnData[i], bootstrap, bootstrap_steps)
Residues.append(r)
# print(' %8s %8.4f %8.4f' % (self.resname[i], r.TotalEn[0], r.TotalEn[1]))
data.append([i, i + 1, self.resname[i], r.TotalEn[0], r.TotalEn[1]])
self.pandas_residue_energy_data = pd.DataFrame(data)
self.pandas_residue_energy_data.columns = columns_residue_energy
test = 1
self.most_contributions = self.pandas_residue_energy_data[:-1]
self.most_contributions = self.most_contributions.sort_values(['TotalEnergy'])
test = 1
def calculate_binding_energy_full(self, idx=0,jump_data=1, bootstrap=False, bootstrap_steps=5000):
'''
Calculate full binding energy then analyze autocorrelation and partial correlation
:param idx: from frame number
:param bootstrap: for this one dont calculate bootstrap
:param bootstrap_steps:
:return:
'''
# TODO CALCULATION OF BINDING ENERGY
outfr = self.simulation_name + '_full.log'
try:
frame_wise = open(outfr, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(outfr))
frame_wise.write(
'#Time E_VdW_mm(Protein)\tE_Elec_mm(Protein)\tE_Pol(Protein)\tE_Apol(Protein)\tE_VdW_mm(Ligand)\tE_Elec_mm(Ligand)\tE_Pol(Ligand)\tE_Apol(Ligand)\tE_VdW_mm(Complex)\tE_Elec_mm(Complex)\tE_Pol(Complex)\tE_Apol(Complex)\tDelta_E_mm\tDelta_E_Pol\tDelta_E_Apol\tDelta_E_binding\n')
self.frame_wise_full = frame_wise
self.c_full = []
self.full_copy_original.CalcEnergy(self.frame_wise_full, idx, jump_data=jump_data, bootstrap=bootstrap, bootstrap_steps=bootstrap_steps)
self.c_full.append(self.full_copy_original)
summary_output_filename = self.simulation_name + '_binding_summary_full.log'
Summary_Output_File(self.c_full, summary_output_filename, self.meta_file)
self.autocorr_analysis(self.c_full, 'full')
def calculate_binding_energy_bootstrap(self, idx=0, bootstrap=True, bootstrap_steps=5000, bootstrap_jump=4):
'''
Calculate bootstrap binding energy then analyze autocorrelation and partial correlation
:param idx: from frame number
:param bootstrap: for this one dont calculate bootstrap
:param bootstrap_steps:
:return:
'''
# TODO CALCULATION OF BINDING ENERGY
outfr = self.simulation_name + '_bootstrap.log'
try:
frame_wise = open(outfr, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(outfr))
frame_wise.write(
'#Time E_VdW_mm(Protein)\tE_Elec_mm(Protein)\tE_Pol(Protein)\tE_Apol(Protein)\tE_VdW_mm(Ligand)\tE_Elec_mm(Ligand)\tE_Pol(Ligand)\tE_Apol(Ligand)\tE_VdW_mm(Complex)\tE_Elec_mm(Complex)\tE_Pol(Complex)\tE_Apol(Complex)\tDelta_E_mm\tDelta_E_Pol\tDelta_E_Apol\tDelta_E_binding\n')
self.frame_wise_bootstrap = frame_wise
self.c_bootstrap = []
self.full_copy_bootstrap.CalcEnergy(self.frame_wise_bootstrap, idx,
bootstrap=bootstrap,
bootstrap_steps=bootstrap_steps,
bootstrap_jump=bootstrap_jump)
self.c_bootstrap.append(self.full_copy_bootstrap)
summary_output_filename = self.simulation_name + '_binding_summary_bootstrap.log'
Summary_Output_File(self.c_bootstrap, summary_output_filename, self.meta_file)
self.autocorr_analysis(self.c_bootstrap, 'bootstrap')
def autocorr_analysis(self, energy_val, naming='full'):
if naming =='full':
total_en = energy_val[0].TotalEn
time = energy_val[0].time
else:
total_en = energy_val[0].TotalEn_bootstrap
time = energy_val[0].time_bootstrap
# Old version :)
# print('Mean autocorrelation ', np.mean(autocorr(total_en)))
# plt.semilogx(time, autocorr(total_en))
# plt.xlabel('Time [ps]', size=16)
# plt.ylabel('Binding Energy autocorrelation', size=16)
# plt.show()
from pandas import Series
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
series = Series.from_array(total_en, index=time)
# https://machinelearningmastery.com/gentle-introduction-autocorrelation-partial-autocorrelation/
plot_acf(series, alpha=0.05)
# pyplot.show()
plt.savefig(self.simulation_name + '_autocorrelation_bindingEnergy_{0}.png'.format(naming), dpi=600)
from statsmodels.graphics.tsaplots import plot_pacf
# plot_pacf(series, lags=50)
plot_pacf(series)
plt.savefig(self.simulation_name +'_partial_autocorrelation_bindingEnergy_{0}.png'.format(naming), dpi=600)
#pyplot.show()
test = 1
def plot_binding_energy_full(self):
bind_energy = self.full_copy_original.TotalEn
time = self.full_copy_original.time
dataframe = converters.convert_data_to_pandas(time, bind_energy,
x_axis_name='time',
y_axis_name='binding')
import seaborn as sns
sns.set(style="ticks")
plt.clf()
plt.plot(time, bind_energy)
plt.savefig('test.png', dpi=600)
# sns.lmplot(x="time", y="binding",data=dataframe,
# ci=None, palette="muted", size=4,
# scatter_kws={"s": 50, "alpha": 1})
# sns.tsplot(data=dataframe)
test = 1
def add_centroid_pdb_file(self, filename, simplified_state=True):
self.centroid_pdb_file = filename
self.dssp_traj = md.load(self.centroid_pdb_file)
self.dssp_data = md.compute_dssp(self.dssp_traj, simplified=simplified_state)
# self.helixes = protein_analysis.find_helixes(self.dssp_data)
self.helixes = protein_analysis.find_dssp_domain(self.dssp_data, type='H')
self.strands = protein_analysis.find_dssp_domain(self.dssp_data, type='E')
self.data_to_save = {self.sim_num: {'residueEnergyData': self.pandas_residue_energy_data[:-1],
'mostResidueContrib': self.most_contributions_plot,
'mostAllContrib': self.most_contributions_plot_all,
'centroidFile': self.centroid_pdb_file,
'dsspObject': self.dssp_data,
'dsspData': self.dssp_data,
'dsspStructures': {'helix': self.helixes,
'strands': self.strands}}
}
test = 1
def save_mmpbsa_analysis_pickle(self, filename):
import pickle
if filename is None:
filename = self.simulation_name + '_pickleFile.pickle'
# pickle.dump(self.cluster_models, open(filename, "wb"))
pickle.dump(self.data_to_save, open(filename, "wb"))
def plot_bar_energy_residues(self,
custom_dpi=600,
trasparent_alpha=False):
# sns.set(style="white", context="talk")
sns.set(style="ticks", context="paper")
# Set up the matplotlib figure
f, ax1 = plt.subplots(1, 1, figsize=(plot_tools.cm2inch(17, 10)), sharex=True)
# Generate some sequential data
to_plot_data = self.pandas_residue_energy_data[:-1]
sns.barplot(to_plot_data['ResidueNum'], to_plot_data['TotalEnergy'],
palette="BuGn_d", ax=ax1)
ax1.set_ylabel("Contribution Energy (kJ/mol)")
ax1.set_xlabel("Residue Number")
last_index = to_plot_data['ResidueNum'].iloc[-1]
# this is buggy
x_label_key = []
# ax1.set_xticklabels(to_plot_data['ResidueNum']) # set new labels
# # ax1.set_x
#
# for ind, label in enumerate(ax1.get_xticklabels()):
# if ind+1 == last_index:
# label.set_visible(True)
# elif (ind+1) % 100 == 0: # every 100th label is kept
# label.set_visible(True)
# # label = round(sim_time[ind])
# # x_label_key.append(ind)
# else:
# label.set_visible(False)
# x_label_key.append(ind)
ax1.set_xlim(1, last_index)
ax1.xaxis.set_major_locator(ticker.LinearLocator(3))
ax1.xaxis.set_minor_locator(ticker.LinearLocator(31))
labels = [item.get_text() for item in ax1.get_xticklabels()]
test = 1
labels[0] = '1'
labels[1] = str(last_index // 2)
labels[2] = str(last_index)
ax1.set_xticklabels(labels)
# ax1.text(0.0, 0.1, "LinearLocator(numticks=3)",
# fontsize=14, transform=ax1.transAxes)
tick_labels = []
# for ind, tick in enumerate(ax1.get_xticklines()):
# # tick part doesn't work
# test = ind
# # if ind+1 == last_index:
# # tick.set_visible(True)
# if (ind+1) % 10 == 0: # every 100th label is kept
# tick.set_visible(True)
# else:
# tick.set_visible(False)
# tick_labels.append(tick)
#
# ax1.set_xticklabels
# for ind, label in enumerate(ax.get_yticklabels()):
# if ind % 50 == 0: # every 100th label is kept
# label.set_visible(True)
# else:
# label.set_visible(False)
#
# for ind, tick in enumerate(ax.get_yticklines()):
# if ind % 50 == 0: # every 100th label is kept
# tick.set_visible(True)
# else:
# tick.set_visible(False)
# Finalize the plot
sns.despine()
# plt.setp(f.axes, yticks=[])
plt.tight_layout()
# plt.tight_layout(h_pad=3)
# sns.plt.show()
f.savefig(self.simulation_name + '_residue_contribution_all.png',
dpi=custom_dpi,
transparent=trasparent_alpha)
def plot_most_contributions(self,
custom_dpi=600,
trasparent_alpha=False):
sns.set(style="white", context="talk")
# Set up the matplotlib figure
# f, ax1 = plt.subplots(1, 1, figsize=(plot_tools.cm2inch(17, 10)), sharex=True)
# Generate some sequential data
self.most_contributions_plot = self.most_contributions[self.most_contributions['TotalEnergy'] < -1.0]
self.most_contributions_plot = self.most_contributions_plot[
np.isfinite(self.most_contributions_plot['TotalEnergy'])]
# self.most_contributions_plot = self.most_contributions_plot.dropna(axis=1)
test = 1
# sns.barplot(self.most_contributions_plot['Residue'], self.most_contributions_plot['TotalEnergy'],
# palette="BuGn_d", ax=ax1)
# cmap = sns.cubehelix_palette(n_colors=len(self.most_contributions_plot['TotalEnergy']), as_cmap=True)
cmap = sns.dark_palette("palegreen", as_cmap=True)
ax1 = self.most_contributions_plot.plot(x='Residue', y='TotalEnergy', yerr='TotalEnergySD', kind='bar',
colormap='Blues',
legend=False)
# ax1 = self.most_contributions_plot['TotalEnergy'].plot(kind='bar')
# ax1.bar(self.most_contributions_plot['ResidueNum'], self.most_contributions_plot['TotalEnergy'],
# width=40,
# yerr=self.most_contributions_plot['TotalEnergySD'])
ax1.set_ylabel("Contribution Energy (kJ/mol)")
#
# # # Center the data to make it diverging
# # y2 = y1 - 5
# # sns.barplot(x, y2, palette="RdBu_r", ax=ax2)
# # ax2.set_ylabel("Diverging")
# #
# # # Randomly reorder the data to make it qualitative
# # y3 = rs.choice(y1, 9, replace=False)
# # sns.barplot(x, y3, palette="Set3", ax=ax3)
# # ax3.set_ylabel("Qualitative")
#
# # Finalize the plot
#
labels = ax1.get_xticklabels() # get x labels
# for i, l in enumerate(labels):
# if (i % 2 == 0): labels[i] = '' # skip even labels
ax1.set_xticklabels(self.most_contributions_plot['Residue'], rotation=50) # set new labels
# plt.show()
#
#
# sns.despine(bottom=True)
# # plt.setp(f.axes, yticks=[])
plt.tight_layout()
# # plt.tight_layout(h_pad=3)
# # sns.plt.show()
#
plt.savefig(self.simulation_name + '_most_residue_contribution.png',
dpi=custom_dpi,
transparent=trasparent_alpha)
def plot_sorted_contributions(self,
custom_dpi=600,
trasparent_alpha=False,
lower_criteria=-0.5,
upper_criteria=0.5
):
my_cmap = sns.light_palette("Navy", as_cmap=True)
self.cmap_residue_energy = sns.cubehelix_palette(as_cmap=True)
self.most_contributions_plot_all = self.most_contributions[
(self.most_contributions['TotalEnergy'] < lower_criteria) |
(self.most_contributions['TotalEnergy'] > upper_criteria)]
colors_sns = sns.cubehelix_palette(n_colors=len(self.most_contributions_plot_all), dark=0.5, light=0.92,
reverse=True)
# residue_color_data = converters.convert_seaborn_color_to_rgb(colors)
self.all_residue_colors_to_rgb = converters.convert_values_to_rgba(self.most_contributions_plot_all['TotalEnergy'],
cmap=self.cmap_residue_energy, type='seaborn')
# colors = sns.cubehelix_palette(n_colors=len(self.most_contributions_plot_all), dark=0.5, light=0.92, reverse=True)
#
# residue_color_data = converters.convert_seaborn_color_to_rgb(colors)
# sns.palplot(colors)
# plot_tools.custom_palplot_vertical(colors)
# sns.plt.show()
test = 1
# self.most_contributions_plot_all.plot(x='Residue', y='TotalEnergy', yerr='TotalEnergySD', kind='bar',
# colormap=self.cmap_residue_energy,
# legend=False)
# f, ax1 = plt.subplots(1, 1, figsize=(plot_tools.cm2inch(17 , 10)), sharex=True)
sns.set(style="white", context="talk")
self.most_contributions_plot_all.plot(x='Residue', y='TotalEnergy', yerr='TotalEnergySD', kind='bar',
colors=colors_sns,
legend=False)
plt.ylabel("Contribution Energy (kJ/mol)")
plt.xlabel("Residues")
plt.tight_layout()
# # plt.tight_layout(h_pad=3)
# # sns.plt.show()
#
plt.savefig(self.simulation_name + '_sorted_residue_contribution.png',
dpi=custom_dpi,
transparent=trasparent_alpha)
@hlp.timeit
def visualize_interactions_pymol(self, show_energy=False):
# self.clusters_centroids_mmpbsa_dict
# self.filtered_neighbours
test = 1
print('Start of Pymol MD MMPBSA residue show smethod ---> ')
print('Visualising MMPBSA residue energy contribution')
# To pass Values
# self.cmap_residue_energy
# self.most_contributions_plot_all
#
# self.all_residue_colors_to_rgba
save_state_name = self.receptor_name + '_' + self.molecule_name + '_' + \
'centroid:{0}_mdEnergyAnalyzer_pymolViz.pse'.format(self.sim_num)
pymol_tools.generate_pymol_residue_energy_viz(self.centroid_pdb_file,
self.dssp_data,
self.most_contributions_plot_all,
save_state_name,
show_residue_energy=show_energy
)
time.sleep(5)
print('Finished Pymol method ---> verify yolo')
# try:
# fout = open(args.output, 'w')
# except:
# raise IOError('Could not open file {0} for writing. \n'.format(args.output))
# try:
# fmap = open(args.outmap, 'w')
# except:
# raise IOError('Could not open file {0} for writing. \n'.format(args.outmap))
# fout.write(
# '#Residues MM Energy(+/-)dev/error Polar Energy(+/-)dev/error APolar Energy(+/-)dev/error Total Energy(+/-)dev/error\n')
# for i in range(len(resname)):
# if (args.cutoff == 999):
# fout.write("%-8s %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f \n" % (
# resname[i], Residues[i].FinalMM[0], Residues[i].FinalMM[1], Residues[i].FinalPol[0],
# Residues[i].FinalPol[1], Residues[i].FinalAPol[0], Residues[i].FinalAPol[1], Residues[i].TotalEn[0],
# Residues[i].TotalEn[1]))
# elif (args.cutoff <= Residues[i].TotalEn[0]) or ((-1 * args.cutoff) >= Residues[i].TotalEn[0]):
# fout.write("%-8s %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f \n" % (
# resname[i], Residues[i].FinalMM[0], Residues[i].FinalMM[1], Residues[i].FinalPol[0],
# Residues[i].FinalPol[1], Residues[i].FinalAPol[0], Residues[i].FinalAPol[1], Residues[i].TotalEn[0],
# Residues[i].TotalEn[1]))
#
# fmap.write("%-8d %4.4f \n" % ((i + 1), Residues[i].TotalEn[0])) # TODO Binding energy calculation
def autocorr(x):
"Compute an autocorrelation with numpy"
x = x - np.mean(x)
result = np.correlate(x, x, mode='full')
result = result[result.size//2:]
return result / result[0]
def PlotEnrgy(c, FitCoef_all, args, fname):
CompEn, CompEnErr, ExpEn, CI = [], [], [], []
for i in range(len(c)):
CompEn.append(c[i].FinalAvgEnergy)
ExpEn.append(c[i].freeEn)
CompEnErr.append(c[i].StdErr)
CI.append(c[i].CI)
fig = plt.figure()
plt.subplots_adjust(left=0.15, right=0.9, top=0.9, bottom=0.15)
ax = fig.add_subplot(111)
CI = np.array(CI).T
# To plot data
ax.errorbar(ExpEn, CompEn, yerr=CI, fmt='o', ecolor='k', color='k', zorder=20000)
# To plot straight line having median correlation coefficiant
fit = np.polyfit(ExpEn, CompEn, 1)
fitCompEn = np.polyval(fit, ExpEn)
ax.plot(ExpEn, fitCompEn, color='k', lw=3, zorder=20000)
# To plot straight line having minimum correlation coefficiant
# fitCompEn = np.polyval(FitCoef[1], ExpEn)
# ax.plot(ExpEn,fitCompEn,color='g',lw=2)
# To plot straight line having maximum correlation coefficiant
# fitCompEn = np.polyval(FitCoef[2], ExpEn)
# ax.plot(ExpEn,fitCompEn,color='r',lw=2)
for i in range(len(FitCoef_all[0])):
fitCompEn = np.polyval([FitCoef_all[0][i], FitCoef_all[1][i]], ExpEn)
ax.plot(ExpEn, fitCompEn, color='#BDBDBD', lw=0.5, zorder=1)
ax.set_xlabel('Experimental Free Energy (kJ/mol)', fontsize=24, fontname='Times new Roman')
ax.set_ylabel('Computational Binding Energy (kJ/mol)', fontsize=24, fontname='Times new Roman')
xtics = ax.get_xticks()
plt.xticks(xtics, fontsize=24, fontname='Times new Roman')
ytics = ax.get_yticks()
plt.yticks(ytics, fontsize=24, fontname='Times new Roman')
plt.savefig(fname, dpi=300, orientation='landscape')
def PlotCorr(c, corr_outname, fname, bootstrap_nsteps):
CompEn, ExpEn = [], []
for i in range(len(c)):
CompEn.append(c[i].FinalAvgEnergy)
ExpEn.append(c[i].freeEn)
AvgEn = np.sort(c[i].AvgEnBS, kind='mergesort')
n = len(AvgEn)
div = int(n / 21)
AvgEn = AvgEn[:n:div]
c[i].AvgEnBS = AvgEn
main_r = np.corrcoef([CompEn, ExpEn])[0][1]
r, FitCoef = [], []
Id_0_FitCoef, Id_1_FitCoef = [], []
f_corrdist = open(corr_outname, 'w')
# Bootstrap analysis for correlation coefficiant
nbstep = bootstrap_nsteps
for i in range(nbstep):
temp_x, temp_y = [], []
energy_idx = np.random.randint(0, 22, size=len(c))
complex_idx = np.random.randint(0, len(c), size=len(c))
for j in range(len(complex_idx)):
temp_y.append(c[complex_idx[j]].AvgEnBS[energy_idx[j]])
temp_x.append(c[complex_idx[j]].freeEn)
rtmp = np.corrcoef([temp_x, temp_y])[0][1]
temp_x = np.array(temp_x)
temp_y = np.array(temp_y)
r.append(rtmp)
fit = np.polyfit(temp_x, temp_y, 1)
FitCoef.append(fit)
f_corrdist.write('{0}\n'.format(rtmp))
# Seprating Slope and intercept
Id_0_FitCoef = np.transpose(FitCoef)[0]
Id_1_FitCoef = np.transpose(FitCoef)[1]
# Calculating mode of coorelation coefficiant
density, r_hist = np.histogram(r, 25, normed=True)
mode = (r_hist[np.argmax(density) + 1] + r_hist[np.argmax(density)]) / 2
# Calculating Confidence Interval
r = np.sort(r)
CI_min_idx = int(0.005 * nbstep)
CI_max_idx = int(0.995 * nbstep)
CI_min = mode - r[CI_min_idx]
CI_max = r[CI_max_idx] - mode
print("%5.3f %5.3f %5.3f %5.3f" % (main_r, mode, CI_min, CI_max))
# Plotting Correlation Coefficiant Distribution
fig = plt.figure()
plt.subplots_adjust(left=0.15, right=0.9, top=0.9, bottom=0.15)
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(r, 40, normed=1, facecolor='#B2B2B2', alpha=0.75, lw=0.1)
plt.title('Mode = {0:.3f}\nConf. Int. = -{1:.3f}/+{2:.3f}'.format(mode, CI_min, CI_max), fontsize=18,
fontname='Times new Roman')
bincenters = 0.5 * (bins[1:] + bins[:-1])
# y = mlab.normpdf( bincenters, mode, np.std(r))
# l = ax.plot(bincenters, y, 'k--', lw=1)
ax.set_xlabel('Correlation Coefficient', fontsize=24, fontname='Times new Roman')
ax.set_ylabel('Density', fontsize=24, fontname='Times new Roman')
xtics = ax.get_xticks()
plt.xticks(xtics, fontsize=24, fontname='Times new Roman')
ytics = ax.get_yticks()
plt.yticks(ytics, fontsize=24, fontname='Times new Roman')
plt.savefig(fname, dpi=300, orientation='landscape')
return [Id_0_FitCoef, Id_1_FitCoef]
class Complex(object):
def __init__(self, MmFile, PolFile, APolFile):
self.frames = []
self.TotalEn = []
self.Vdw, self.Elec, self.Pol, self.Sas, self.Sav, self.Wca = [], [], [], [], [], []
self.MmFile = MmFile
self.PolFile = PolFile
self.APolFile = APolFile
self.AvgEnBS = []
self.CI = []
self.FinalAvgEnergy = 0
self.StdErr = 0
def jump_data_conv(self, data, jump_data):
temp_data = []
for tempus in data:
new_temp = tempus[::jump_data]
temp_data.append(new_temp)
return temp_data
def CalcEnergy(self, frame_wise, idx, jump_data=1, bootstrap=False, bootstrap_jump=4, bootstrap_steps=None):
mmEn = ReadData(self.MmFile, n=7)
mmEn = ReadData(self.MmFile, n=7)
polEn = ReadData(self.PolFile, n=4)
apolEn = ReadData(self.APolFile, n=10)
if jump_data>1:
mmEn = self.jump_data_conv( mmEn, jump_data)
polEn = self.jump_data_conv(polEn, jump_data)
apolEn = self.jump_data_conv(apolEn, jump_data)
CheckEnData(mmEn, polEn, apolEn)
time, MM, Vdw, Elec, Pol, Apol, Sas, Sav, Wca = [], [], [], [], [], [], [], [], []
for i in range(len(mmEn[0])):
# Vacuum MM
Energy = mmEn[5][i] + mmEn[6][i] - (mmEn[1][i] + mmEn[2][i] + mmEn[3][i] + mmEn[4][i])
MM.append(Energy)
Energy = mmEn[5][i] - (mmEn[1][i] + mmEn[3][i])
Vdw.append(Energy)
Energy = mmEn[6][i] - (mmEn[2][i] + mmEn[4][i])
Elec.append(Energy)
# Polar
Energy = polEn[3][i] - (polEn[1][i] + polEn[2][i])
Pol.append(Energy)
# Non-polar
Energy = apolEn[3][i] + apolEn[6][i] + apolEn[9][i] - (
apolEn[1][i] + apolEn[2][i] + apolEn[4][i] + apolEn[5][i] + apolEn[7][i] + apolEn[8][i])
Apol.append(Energy)
Energy = apolEn[3][i] - (apolEn[1][i] + apolEn[2][i])
Sas.append(Energy)
Energy = apolEn[6][i] - (apolEn[4][i] + apolEn[5][i])
Sav.append(Energy)
Energy = apolEn[9][i] - (apolEn[7][i] + apolEn[8][i])
Wca.append(Energy)
# Final Energy
time.append(mmEn[0][i])
Energy = MM[i] + Pol[i] + Apol[i]
self.TotalEn.append(Energy)
# TODO HISTOGRAM NEED TO DO SOMETHING
# TAKE A VERY CAREFUL LOOK
# https://machinelearningmastery.com/calculate-bootstrap-confidence-intervals-machine-learning-results-python/
plt.clf()
plt.hist(self.TotalEn)
plt.show()
plt.clf()
self.time = time
self.time_bootstrap = time[::bootstrap_jump]
self.TotalEn_bootstrap = self.TotalEn[::bootstrap_jump]
# Writing frame wise component energy to file
frame_wise.write('\n#Complex %d\n' % ((idx + 1)))
for i in range(len(time)):
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf %15.3lf' % (
time[i], mmEn[1][i], mmEn[2][i], polEn[1][i], (apolEn[1][i] + apolEn[4][i] + apolEn[7][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf' % (
mmEn[3][i], mmEn[4][i], polEn[2][i], (apolEn[2][i] + apolEn[5][i] + apolEn[8][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf' % (
mmEn[5][i], mmEn[6][i], polEn[3][i], (apolEn[3][i] + apolEn[6][i] + apolEn[9][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf\n' % (MM[i], Pol[i], Apol[i], self.TotalEn[i]))
# Bootstrap analysis energy components
if bootstrap is True:
bsteps = bootstrap_steps
curr_Vdw = Vdw[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Vdw, bsteps)
self.Vdw.append(avg_energy)
self.Vdw.append(error)
curr_Elec = Elec[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Elec, bsteps)
self.Elec.append(avg_energy)
self.Elec.append(error)
curr_Pol = Pol[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Pol, bsteps)
self.Pol.append(avg_energy)
self.Pol.append(error)
curr_Sas = Sas[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Sas, bsteps)
self.Sas.append(avg_energy)
self.Sas.append(error)
curr_Sav = Sav[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Sav, bsteps)
self.Sav.append(avg_energy)
self.Sav.append(error)
curr_Wca = Wca[::bootstrap_jump]
avg_energy, error = BootStrap(curr_Wca, bsteps)
self.Wca.append(avg_energy)
self.Wca.append(error)
# Bootstrap => Final Average Energy
curr_TotalEn = self.TotalEn_bootstrap
#from matplotlib import pyplot
self.AvgEnBS, AvgEn, EnErr, CI = ComplexBootStrap(curr_TotalEn, bsteps)
self.FinalAvgEnergy = AvgEn
self.StdErr = EnErr
self.CI = CI
# If not bootstrap then average and standard deviation
else:
self.Vdw.append(np.mean(Vdw))
self.Vdw.append(np.std(Vdw))
self.Elec.append(np.mean(Elec))
self.Elec.append(np.std(Elec))
self.Pol.append(np.mean(Pol))
self.Pol.append(np.std(Pol))
self.Sas.append(np.mean(Sas))
self.Sas.append(np.std(Sas))
self.Sav.append(np.mean(Sav))
self.Sav.append(np.std(Sav))
self.Wca.append(np.mean(Wca))
self.Wca.append(np.std(Wca))
self.FinalAvgEnergy = np.mean(self.TotalEn)
self.StdErr = np.std(self.TotalEn)
def Summary_Output_File(AllComplex, output_name, meta_file=None):
try:
fs = open(output_name, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(args.outsum))
if meta_file:
try:
fm = open(output_name + '_meta_verify yolo.txt', 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(args.outmeta))
fm.write('# Complex_Number\t\tTotal_Binding_Energy\t\tError\n')
for n in range(len(AllComplex)):
fs.write('\n\n#Complex Number: %4d\n' % (n + 1))
fs.write('===============\n SUMMARY \n===============\n\n')
fs.write('\n van der Waal energy = %15.3lf +/- %7.3lf kJ/mol\n' % (
AllComplex[n].Vdw[0], AllComplex[n].Vdw[1]))
fs.write('\n Electrostattic energy = %15.3lf +/- %7.3lf kJ/mol\n' % (
AllComplex[n].Elec[0], AllComplex[n].Elec[1]))
fs.write('\n Polar solvation energy = %15.3lf +/- %7.3lf kJ/mol\n' % (
AllComplex[n].Pol[0], AllComplex[n].Pol[1]))
fs.write('\n SASA energy = %15.3lf +/- %7.3lf kJ/mol\n' % (
AllComplex[n].Sas[0], AllComplex[n].Sas[1]))
fs.write('\n SAV energy = %15.3lf +/- %7.3lf kJ/mol\n' % (
AllComplex[n].Sav[0], AllComplex[n].Sav[1]))
fs.write('\n WCA energy = %15.3lf +/- %7.3lf kJ/mol\n' % (
AllComplex[n].Wca[0], AllComplex[n].Wca[1]))
fs.write('\n Binding energy = %15.3lf +/- %7.3lf kJ/mol\n' % (
AllComplex[n].FinalAvgEnergy, AllComplex[n].StdErr))
fs.write('\n===============\n END \n===============\n\n')
if meta_file:
fm.write('%5d %15.3lf %7.3lf\n' % (n + 1, AllComplex[n].FinalAvgEnergy, AllComplex[n].StdErr))
def CheckEnData(mmEn, polEn, apolEn):
frame = len(mmEn[0])
for i in range(len(mmEn)):
if (len(mmEn[i]) != frame):
raise ValueError("In MM file, size of columns are not equal.")
for i in range(len(polEn)):
if (len(polEn[i]) != frame):
raise ValueError("In Polar file, size of columns are not equal.")
for i in range(len(apolEn)):
if (len(apolEn[i]) != frame):
raise ValueError("In APolar file, size of columns are not equal.")
def ParseOptions():
parser = argparse.ArgumentParser()
parser.add_argument("-mt", "--multiple",
help='If given, calculate for multiple complexes. Need Metafile containing path of energy files',
action="store_true")
parser.add_argument("-mf", "--metafile", help='Metafile containing path to energy files of each complex in a row obtained from g_mmpbsa in following order: \
[MM file] [Polar file] [ Non-polar file] [Ki] \
Ki Should be in NanoMolar (nM)', action="store", default='metafile.dat',
metavar='metafile.dat')
parser.add_argument("-m", "--molmech", help='Vacuum Molecular Mechanics energy file obtained from g_mmpbsa',
action="store", default='energy_MM.xvg', metavar='energy_MM.xvg')
parser.add_argument("-p", "--polar", help='Polar solvation energy file obtained from g_mmpbsa', action="store",
default='polar.xvg', metavar='polar.xvg')
parser.add_argument("-a", "--apolar", help='Non-Polar solvation energy file obtained from g_mmpbsa', action="store",
default='apolar.xvg', metavar='apolar.xvg')
parser.add_argument("-bs", "--bootstrap", help='If given, Enable Boot Strap analysis', action="store_true")
parser.add_argument("-nbs", "--nbstep", help='Number of boot strap steps for average energy calculation',
action="store", type=int, default=1000)
parser.add_argument("-of", "--outfr", help='Energy File: Energy components frame wise', action="store",
default='full_energy.dat', metavar='full_energy.dat')
parser.add_argument("-os", "--outsum", help='Final Energy File: Full Summary of energy components', action="store",
default='summary_energy.dat', metavar='summary_energy.dat')
parser.add_argument("-om", "--outmeta",
help='Final Energy File for Multiple Complexes: Complex wise final binding nergy',
action="store", default='meta_energy.dat', metavar='meta_energy.dat')
parser.add_argument("-ep", "--enplot", help='Experimental Energy vs Calculated Energy Correlation Plot',
action="store", default='enplot.png', metavar='enplot.png')
parser.add_argument("-cd", "--corrdist", help='Correlation distribution data from bootstrapping', action="store",
default='corrdist.dat', metavar='corrdist.dat')
parser.add_argument("-cp", "--corrplot", help='Plot of correlation distribution', action="store",
default='corrdist.png', metavar='corrdist.png')
if len(sys.argv) < 2:
print('ERROR: No input files. Need help!!!')
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.multiple:
if not os.path.exists(args.metafile):
print('\nERROR: {0} not found....\n'.format(args.metafile))
parser.print_help()
sys.exit(1)
else:
if not os.path.exists(args.molmech):
print('\nERROR: {0} not found....\n'.format(args.molmech))
parser.print_help()
sys.exit(1)
if not os.path.exists(args.polar):
print('\nERROR: {0} not found....\n'.format(args.polar))
parser.print_help()
sys.exit(1)
if not os.path.exists(args.apolar):
print('\nERROR: {0} not found....\n'.format(args.apolar))
parser.print_help()
sys.exit(1)
return args
def ReadData(FileName, n=2):
infile = open(FileName, 'r')
x, data = [], []
for line in infile:
line = line.rstrip('\n')
if not line.strip():
continue
if (re.match('#|@', line) == None):
temp = line.split()
data.append(np.array(temp))
for j in range(0, n):
x_temp = []
for i in range(len(data)):
try:
value = float(data[i][j])
except:
raise FloatingPointError(
'\nCould not convert {0} to floating point number.. Something is wrong in {1}..\n'.format(
data[i][j], FileName))
x_temp.append(value)
x.append(x_temp)
return x
def ComplexBootStrap(x, step=1000):
avg = []
x = np.array(x)
n = len(x)
idx = np.random.randint(0, n, (step, n))
sample_x = x[idx]
avg = np.sort(np.mean(sample_x, 1))
CI_min = avg[int(0.005 * step)]
CI_max = avg[int(0.995 * step)]
import scipy
import scikits.bootstrap as bootstrap
CIs = bootstrap.ci(data=x, statfunction=scipy.mean, n_samples=20000, alpha=0.005)
print("Bootstrapped 99.5% confidence intervals\nLow:", CIs[0], "\nHigh:", CIs[1])
print('------------------------------------------------------------------------------')
print("Bootstrapped 99.5% confidence intervals ORIGINAL\nLow:", CI_min, "\nHigh:", CI_max)
# print('Energy = %13.3f; Confidance Interval = (-%-5.3f / +%-5.3f)\n' % (np.mean(avg), (np.mean(avg)-CI_min), (CI_max-np.mean(avg))))
return avg, np.mean(avg), np.std(avg), [(np.mean(avg) - CI_min), (CI_max - np.mean(avg))]
def BootStrap(x, step=1000):
if (np.mean(x)) == 0:
return 0.000, 0.000
else:
avg = []
x = np.array(x)
n = len(x)
idx = np.random.randint(0, n, (step, n))
sample_x = x[idx]
avg = np.sort(np.mean(sample_x, 1))
return np.mean(avg), np.std(avg)
def find_nearest_index(array, value):
idx = (np.abs(array - value)).argmin()
return idx
def ReadMetafile(metafile):
MmFile, PolFile, APolFile, Ki = [], [], [], []
FileList = open(metafile, 'r')
for line in FileList:
line = line.rstrip('\n')
if not line.strip():
continue
temp = line.split()
MmFile.append(temp[0])
PolFile.append(temp[1])
APolFile.append(temp[2])
Ki.append(float(temp[3]))
if not os.path.exists(temp[0]):
raise IOError('Could not open file {0} for reading. \n'.format(temp[0]))
if not os.path.exists(temp[1]):
raise IOError('Could not open file {0} for reading. \n'.format(temp[1]))
if not os.path.exists(temp[2]):
raise IOError('Could not open file {0} for reading. \n'.format(temp[2]))
return MmFile, PolFile, APolFile, Ki
# TODO RESIDUE CALUCALTION
def main():
args = ParseOptions()
MMEnData, resnameA = ReadData_Residue_Parse(args.molmech)
polEnData, resnameB = ReadData_Residue_Parse(args.polar)
apolEnData, resnameC = ReadData_Residue_Parse(args.apolar)
resname = CheckResname(resnameA, resnameB, resnameC)
print('Total number of Residue: {0}\n'.format(len(resname) + 1))
Residues = []
for i in range(len(resname)):
CheckEnData(MMEnData[i], polEnData[i], apolEnData[i])
r = Residue()
r.CalcEnergy(MMEnData[i], polEnData[i], apolEnData[i], args)
Residues.append(r)
print(' %8s %8.4f %8.4f' % (resname[i], r.TotalEn[0], r.TotalEn[1]))
try:
fout = open(args.output, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(args.output))
try:
fmap = open(args.outmap, 'w')
except:
raise IOError('Could not open file {0} for writing. \n'.format(args.outmap))
fout.write(
'#Residues MM Energy(+/-)dev/error Polar Energy(+/-)dev/error APolar Energy(+/-)dev/error Total Energy(+/-)dev/error\n')
for i in range(len(resname)):
if (args.cutoff == 999):
fout.write("%-8s %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f \n" % (
resname[i], Residues[i].FinalMM[0], Residues[i].FinalMM[1], Residues[i].FinalPol[0],
Residues[i].FinalPol[1], Residues[i].FinalAPol[0], Residues[i].FinalAPol[1], Residues[i].TotalEn[0],
Residues[i].TotalEn[1]))
elif (args.cutoff <= Residues[i].TotalEn[0]) or ((-1 * args.cutoff) >= Residues[i].TotalEn[0]):
fout.write("%-8s %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f %4.4f \n" % (
resname[i], Residues[i].FinalMM[0], Residues[i].FinalMM[1], Residues[i].FinalPol[0],
Residues[i].FinalPol[1], Residues[i].FinalAPol[0], Residues[i].FinalAPol[1], Residues[i].TotalEn[0],
Residues[i].TotalEn[1]))
fmap.write("%-8d %4.4f \n" % ((i + 1), Residues[i].TotalEn[0]))
class Residue(object):
def __init__(self):
self.FinalMM, self.FinalPol, self.FinalAPol, self.TotalEn = [], [], [], []
def BootStrap(self, x, step):
avg = []
x = np.array(x)
n = len(x)
idx = np.random.randint(0, n, (step, n))
sample_x = x[idx]
avg = np.sort(
|
np.mean(sample_x, 1)
|
numpy.mean
|
import sys
import os
import numpy as np
from tqdm import tqdm
import json
import time as timemodu
from numba import jit, prange
import h5py
import fnmatch
import pandas as pd
import astropy
import astropy as ap
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.io import fits
import astropy.timeseries
import multiprocessing
from functools import partial
import scipy as sp
import scipy.interpolate
import astroquery
import astroquery.mast
import matplotlib as mpl
import matplotlib.pyplot as plt
# own modules
import tdpy
from tdpy import summgene
import lygos
import hattusa
def quer_mast(request):
from urllib.parse import quote as urlencode
import http.client as httplib
server='mast.stsci.edu'
# Grab Python Version
version = '.'.join(map(str, sys.version_info[:3]))
# Create Http Header Variables
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-agent':'python-requests/'+version}
# Encoding the request as a json string
requestString = json.dumps(request)
requestString = urlencode(requestString)
# opening the https connection
conn = httplib.HTTPSConnection(server)
# Making the query
conn.request('POST', '/api/v0/invoke', 'request='+requestString, headers)
# Getting the response
resp = conn.getresponse()
head = resp.getheaders()
content = resp.read().decode('utf-8')
# Close the https connection
conn.close()
return head, content
def xmat_tici(listtici):
if len(listtici) == 0:
raise Exception('')
# make sure the input is a python list of strings
if isinstance(listtici[0], str):
if isinstance(listtici, np.ndarray):
listtici = list(listtici)
else:
if isinstance(listtici, list):
listtici = np.array(listtici)
if isinstance(listtici, np.ndarray):
listtici = listtici.astype(str)
listtici = list(listtici)
request = {'service':'Mast.Catalogs.Filtered.Tic', 'format':'json', 'params':{'columns':'rad, mass', \
'filters':[{'paramName':'ID', 'values':listtici}]}}
headers, outString = quer_mast(request)
dictquer = json.loads(outString)['data']
return dictquer
def retr_dictpopltic8(typepopl, numbsyst=None, typeverb=1):
'''
Get a dictionary of the sources in the TIC8 with the fields in the TIC8.
Keyword arguments
typepopl: type of the population
'ticihcon': TESS targets with contamination larger than
'ticim110': TESS targets brighter than mag 11
'ticim135': TESS targets brighter than mag 13.5
'tessnomi2min': 2-minute TESS targets obtained by merging the SPOC 2-min bulk downloads
Returns a dictionary with keys:
rasc: RA
decl: declination
tmag: TESS magnitude
radistar: radius of the star
massstar: mass of the star
'''
if typeverb > 0:
print('Retrieving a dictionary of TIC8 for population %s...' % typepopl)
if typepopl.startswith('tess'):
if typepopl[4:].startswith('nomi'):
listtsec = np.arange(1, 27)
elif typepopl[4:].endswith('extd'):
listtsec = np.arange(27, 39)
else:
listtsec = [int(typepopl[-2:])]
numbtsec = len(listtsec)
indxtsec = np.arange(numbtsec)
pathlistticidata = os.environ['EPHESUS_DATA_PATH'] + '/data/listticidata/'
os.system('mkdir -p %s' % pathlistticidata)
path = pathlistticidata + 'listticidata_%s.csv' % typepopl
if not os.path.exists(path):
# dictionary of strings that will be keys of the output dictionary
dictstrg = dict()
dictstrg['ID'] = 'tici'
dictstrg['ra'] = 'rasc'
dictstrg['dec'] = 'decl'
dictstrg['Tmag'] = 'tmag'
dictstrg['rad'] = 'radistar'
dictstrg['mass'] = 'massstar'
dictstrg['Teff'] = 'tmptstar'
dictstrg['logg'] = 'loggstar'
dictstrg['MH'] = 'metastar'
liststrg = list(dictstrg.keys())
print('typepopl')
print(typepopl)
if typepopl.startswith('tessnomi'):
if typepopl[8:12] == '20sc':
strgurll = '_20s_'
labltemp = '20-second'
if typepopl[8:12] == '2min':
strgurll = '_'
labltemp = '2-minute'
dictquer = dict()
listtici = []
for o in indxtsec:
if typepopl.endswith('bulk'):
pathtess = os.environ['TESS_DATA_PATH'] + '/data/lcur/sector-%02d' % listtsec[o]
listnamefile = fnmatch.filter(os.listdir(pathtess), '*.fits')
listticitsec = []
for namefile in listnamefile:
listticitsec.append(str(int(namefile.split('-')[2])))
listticitsec = np.array(listticitsec)
else:
url = 'https://tess.mit.edu/wp-content/uploads/all_targets%sS%03d_v1.csv' % (strgurll, listtsec[o])
c = pd.read_csv(url, header=5)
listticitsec = c['TICID'].values
listticitsec = listticitsec.astype(str)
numbtargtsec = listticitsec.size
if typeverb > 0:
print('%d observed %s targets in Sector %d...' % (numbtargtsec, labltemp, listtsec[o]))
if numbtargtsec > 0:
dictquertemp = xmat_tici(listticitsec)
if o == 0:
dictquerinte = dict()
for name in dictstrg.keys():
dictquerinte[dictstrg[name]] = [[] for o in indxtsec]
for name in dictstrg.keys():
for k in range(len(dictquertemp)):
dictquerinte[dictstrg[name]][o].append(dictquertemp[k][name])
print('Concatenating arrays from different sectors...')
for name in dictstrg.keys():
dictquer[dictstrg[name]] = np.concatenate(dictquerinte[dictstrg[name]])
u, indxuniq = np.unique(dictquer['tici'], return_index=True)
for name in dictstrg.keys():
dictquer[dictstrg[name]] = dictquer[dictstrg[name]][indxuniq]
numbtarg = dictquer['radistar'].size
if typeverb > 0:
print('%d observed 2-min targets...' % numbtarg)
elif typepopl.startswith('tici'):
if typepopl == 'ticihcon':
request = {'service':'Mast.Catalogs.Filtered.Tic.Rows', 'format':'json', 'params':{ \
'columns':'ID, Tmag, rad, mass, contratio', \
'filters':[{'paramName':'contratio', 'values':[{"min":10., "max":1e3}]}]}}
if typepopl == 'ticim110':
request = {'service':'Mast.Catalogs.Filtered.Tic.Rows', 'format':'json', 'params':{ \
'columns':'ID, Tmag, rad, mass', \
'filters':[{'paramName':'Tmag', 'values':[{"min":-100., "max":11}]}]}}
if typepopl == 'ticim135':
request = {'service':'Mast.Catalogs.Filtered.Tic.Rows', 'format':'json', 'params':{ \
'columns':'ID, Tmag, rad, mass', \
'filters':[{'paramName':'Tmag', 'values':[{"min":-100., "max":13.5}]}]}}
headers, outString = quer_mast(request)
listdictquer = json.loads(outString)['data']
if typeverb > 0:
print('%d matches...' % len(listdictquer))
dictquer = dict()
print('listdictquer[0].keys()')
print(listdictquer[0].keys())
for name in listdictquer[0].keys():
if name == 'ID':
namedict = 'tici'
if name == 'Tmag':
namedict = 'tmag'
if name == 'rad':
namedict = 'radi'
if name == 'mass':
namedict = 'mass'
dictquer[namedict] = np.empty(len(listdictquer))
for k in range(len(listdictquer)):
dictquer[namedict][k] = listdictquer[k][name]
else:
print('Unrecognized population name: %s' % typepopl)
raise Exception('')
if typeverb > 0:
#print('%d targets...' % numbtarg)
print('Writing to %s...' % path)
#columns = ['tici', 'radi', 'mass']
pd.DataFrame.from_dict(dictquer).to_csv(path, index=False)#, columns=columns)
else:
if typeverb > 0:
print('Reading from %s...' % path)
dictquer = pd.read_csv(path).to_dict(orient='list')
for name in dictquer.keys():
dictquer[name] = np.array(dictquer[name])
#del dictquer['Unnamed: 0']
return dictquer
def retr_objtlinefade(x, y, colr='black', initalph=1., alphfinl=0.):
colr = get_color(colr)
cdict = {'red': ((0.,colr[0],colr[0]),(1.,colr[0],colr[0])),
'green': ((0.,colr[1],colr[1]),(1.,colr[1],colr[1])),
'blue': ((0.,colr[2],colr[2]),(1.,colr[2],colr[2])),
'alpha': ((0.,initalph, initalph), (1., alphfinl, alphfinl))}
Npts = len(x)
if len(y) != Npts:
raise AttributeError("x and y must have same dimension.")
segments = np.zeros((Npts-1,2,2))
segments[0][0] = [x[0], y[0]]
for i in range(1,Npts-1):
pt = [x[i], y[i]]
segments[i-1][1] = pt
segments[i][0] = pt
segments[-1][1] = [x[-1], y[-1]]
individual_cm = mpl.colors.LinearSegmentedColormap('indv1', cdict)
lc = mpl.collections.LineCollection(segments, cmap=individual_cm)
lc.set_array(np.linspace(0.,1.,len(segments)))
return lc
def retr_liststrgcomp(numbcomp):
liststrgcomp = np.array(['b', 'c', 'd', 'e', 'f', 'g'])[:numbcomp]
return liststrgcomp
def retr_listcolrcomp(numbcomp):
listcolrcomp = np.array(['magenta', 'orange', 'red', 'green', 'purple', 'cyan'])[:numbcomp]
return listcolrcomp
def plot_orbt( \
# path to write the plot
path, \
# radius of the planets [R_E]
radicomp, \
# sum of radius of planet and star divided by the semi-major axis
rsmacomp, \
# epoc of the planets [BJD]
epoc, \
# orbital periods of the planets [days]
peri, \
# cosine of the inclination
cosi, \
# type of visualization:
## 'realblac': dark background, black planets
## 'realblaclcur': dark backgound, luminous planets, with light curves
## 'realcolrlcur': dark background, colored planets, with light curves
## 'cartcolr': bright background, colored planets
typevisu, \
# radius of the star [R_S]
radistar=1., \
# mass of the star [M_S]
massstar=1., \
# Boolean flag to produce an animation
boolanim=False, \
# angle of view with respect to the orbital plane [deg]
anglpers=5., \
# size of the figure
sizefigr=(8, 8), \
listcolrcomp=None, \
liststrgcomp=None, \
boolsingside=True, \
## file type of the plot
typefileplot='pdf', \
# verbosity level
typeverb=1, \
):
dictfact = retr_factconv()
mpl.use('Agg')
numbcomp = len(radicomp)
if isinstance(radicomp, list):
radicomp = np.array(radicomp)
if isinstance(rsmacomp, list):
rsmacomp = np.array(rsmacomp)
if isinstance(epoc, list):
epoc = np.array(epoc)
if isinstance(peri, list):
peri = np.array(peri)
if isinstance(cosi, list):
cosi = np.array(cosi)
if listcolrcomp is None:
listcolrcomp = retr_listcolrcomp(numbcomp)
if liststrgcomp is None:
liststrgcomp = retr_liststrgcomp(numbcomp)
# semi-major axes of the planets [AU]
smax = (radicomp / dictfact['rsre'] + radistar) / dictfact['aurs'] / rsmacomp
indxcomp = np.arange(numbcomp)
# perspective factor
factpers = np.sin(anglpers * np.pi / 180.)
## scale factor for the star
factstar = 5.
## scale factor for the planets
factplan = 20.
# maximum y-axis value
maxmyaxi = 0.05
if typevisu == 'cartmerc':
# Mercury
smaxmerc = 0.387 # [AU]
radicompmerc = 0.3829 # [R_E]
# scaled radius of the star [AU]
radistarscal = radistar / dictfact['aurs'] * factstar
time = np.arange(0., 30., 2. / 60. / 24.)
numbtime = time.size
indxtime = np.arange(numbtime)
if boolanim:
numbiter = min(500, numbtime)
else:
numbiter = 1
indxiter = np.arange(numbiter)
xposmaxm = smax
yposmaxm = factpers * xposmaxm
numbtimequad = 10
if typevisu == 'realblaclcur':
numbtimespan = 100
# get transit model based on TESS ephemerides
rratcomp = radicomp / radistar
rflxtranmodl = retr_rflxtranmodl(time, pericomp=peri, epoccomp=epoc, rsmacomp=rsmacomp, cosicomp=cosi, rratcomp=rratcomp)['rflx'] - 1.
lcur = rflxtranmodl + np.random.randn(numbtime) * 1e-6
ylimrflx = [np.amin(lcur), np.amax(lcur)]
phas = np.random.rand(numbcomp)[None, :] * 2. * np.pi + 2. * np.pi * time[:, None] / peri[None, :]
yposelli = yposmaxm[None, :] * np.sin(phas)
xposelli = xposmaxm[None, :] * np.cos(phas)
# time indices for iterations
indxtimeiter = np.linspace(0., numbtime - numbtime / numbiter, numbiter).astype(int)
if typevisu.startswith('cart'):
colrstar = 'k'
colrface = 'w'
plt.style.use('default')
else:
colrface = 'k'
colrstar = 'w'
plt.style.use('dark_background')
if boolanim:
cmnd = 'convert -delay 5'
listpathtemp = []
for k in indxiter:
if typevisu == 'realblaclcur':
numbrows = 2
else:
numbrows = 1
figr, axis = plt.subplots(figsize=sizefigr)
### lower half of the star
w1 = mpl.patches.Wedge((0, 0), radistarscal, 180, 360, fc=colrstar, zorder=1, edgecolor=colrstar)
axis.add_artist(w1)
for jj, j in enumerate(indxcomp):
xposellishft = np.roll(xposelli[:, j], -indxtimeiter[k])[-numbtimequad:][::-1]
yposellishft = np.roll(yposelli[:, j], -indxtimeiter[k])[-numbtimequad:][::-1]
# trailing lines
if typevisu.startswith('cart'):
objt = retr_objtlinefade(xposellishft, yposellishft, colr=listcolrcomp[j], initalph=1., alphfinl=0.)
axis.add_collection(objt)
# add planets
if typevisu.startswith('cart'):
colrplan = listcolrcomp[j]
# add planet labels
axis.text(.6 + 0.03 * jj, 0.1, liststrgcomp[j], color=listcolrcomp[j], transform=axis.transAxes)
if typevisu.startswith('real'):
if typevisu == 'realillu':
colrplan = 'k'
else:
colrplan = 'black'
radi = radicomp[j] / dictfact['rsre'] / dictfact['aurs'] * factplan
w1 = mpl.patches.Circle((xposelli[indxtimeiter[k], j], yposelli[indxtimeiter[k], j], 0), radius=radi, color=colrplan, zorder=3)
axis.add_artist(w1)
## upper half of the star
w1 = mpl.patches.Wedge((0, 0), radistarscal, 0, 180, fc=colrstar, zorder=4, edgecolor=colrstar)
axis.add_artist(w1)
if typevisu == 'cartmerc':
## add Mercury
axis.text(.387, 0.01, 'Mercury', color='grey', ha='right')
radi = radicompmerc / dictfact['rsre'] / dictfact['aurs'] * factplan
w1 = mpl.patches.Circle((smaxmerc, 0), radius=radi, color='grey')
axis.add_artist(w1)
# temperature axis
#axistwin = axis.twiny()
##axistwin.set_xlim(axis.get_xlim())
#xpostemp = axistwin.get_xticks()
##axistwin.set_xticks(xpostemp[1:])
#axistwin.set_xticklabels(['%f' % tmpt for tmpt in listtmpt])
# temperature contours
#for tmpt in [500., 700,]:
# smaj = tmpt
# axis.axvline(smaj, ls='--')
axis.get_yaxis().set_visible(False)
axis.set_aspect('equal')
if typevisu == 'cartmerc':
maxmxaxi = max(1.2 * np.amax(smax), 0.4)
else:
maxmxaxi = 1.2 * np.amax(smax)
if boolsingside:
minmxaxi = 0.
else:
minmxaxi = -maxmxaxi
axis.set_xlim([minmxaxi, maxmxaxi])
axis.set_ylim([-maxmyaxi, maxmyaxi])
axis.set_xlabel('Distance from the star [AU]')
if typevisu == 'realblaclcur':
print('indxtimeiter[k]')
print(indxtimeiter[k])
minmindxtime = max(0, indxtimeiter[k]-numbtimespan)
print('minmindxtime')
print(minmindxtime)
xtmp = time[minmindxtime:indxtimeiter[k]]
if len(xtmp) == 0:
continue
print('xtmp')
print(xtmp)
timescal = 2 * maxmxaxi * (xtmp - np.amin(xtmp)) / (np.amax(xtmp) - np.amin(xtmp)) - maxmxaxi
print('timescal')
print(timescal)
axis.scatter(timescal, 10000. * lcur[minmindxtime:indxtimeiter[k]] + maxmyaxi * 0.8, rasterized=True, color='cyan', s=0.5)
print('time[minmindxtime:indxtimeiter[k]]')
summgene(time[minmindxtime:indxtimeiter[k]])
print('lcur[minmindxtime:indxtimeiter[k]]')
summgene(lcur[minmindxtime:indxtimeiter[k]])
print('')
#plt.subplots_adjust()
#axis.legend()
if boolanim:
pathtemp = '%s_%s_%04d.%s' % (path, typevisu, k, typefileplot)
else:
pathtemp = '%s_%s.%s' % (path, typevisu, typefileplot)
print('Writing to %s...' % pathtemp)
plt.savefig(pathtemp)
plt.close()
if boolanim:
listpathtemp.append(pathtemp)
cmnd += ' %s' % pathtemp
if boolanim:
cmnd += ' %s_%s.gif' % (path, typevisu)
os.system(cmnd)
for pathtemp in listpathtemp:
cmnd = 'rm %s' % pathtemp
os.system(cmnd)
def retr_dictpoplrvel():
if typeverb > 0:
print('Reading Sauls Gaia high RV catalog...')
path = os.environ['TROIA_DATA_PATH'] + '/data/Gaia_high_RV_errors.txt'
for line in open(path):
listnamesaul = line[:-1].split('\t')
break
if typeverb > 0:
print('Reading from %s...' % path)
data = np.loadtxt(path, skiprows=1)
dictcatl = dict()
dictcatl['rasc'] = data[:, 0]
dictcatl['decl'] = data[:, 1]
dictcatl['stdvrvel'] = data[:, -4]
return dictcatl
def retr_dicthostplan(namepopl, typeverb=1):
pathlygo = os.environ['EPHESUS_DATA_PATH'] + '/'
path = pathlygo + 'data/dicthost%s.csv' % namepopl
if os.path.exists(path):
if typeverb > 0:
print('Reading from %s...' % path)
dicthost = pd.read_csv(path).to_dict(orient='list')
#del dicthost['Unnamed: 0']
for name in dicthost.keys():
dicthost[name] = np.array(dicthost[name])
else:
dicthost = dict()
if namepopl == 'toii':
dictplan = retr_dicttoii()
else:
dictplan = retr_dictexar()
listnamestar = np.unique(dictplan['namestar'])
dicthost['namestar'] = listnamestar
numbstar = listnamestar.size
indxstar = np.arange(numbstar)
listnamefeatstar = ['numbplanstar', 'numbplantranstar', 'radistar', 'massstar']
listnamefeatcomp = ['epoc', 'peri', 'duratrantotl', 'radicomp', 'masscomp']
for namefeat in listnamefeatstar:
dicthost[namefeat] = np.empty(numbstar)
for namefeat in listnamefeatcomp:
dicthost[namefeat] = [[] for k in indxstar]
for k in indxstar:
indx = np.where(dictplan['namestar'] == listnamestar[k])[0]
for namefeat in listnamefeatstar:
dicthost[namefeat][k] = dictplan[namefeat][indx[0]]
for namefeat in listnamefeatcomp:
dicthost[namefeat][k] = dictplan[namefeat][indx]
print('Writing to %s...' % path)
pd.DataFrame.from_dict(dicthost).to_csv(path, index=False)
return dicthost
def retr_dicttoii(toiitarg=None, boolreplexar=False, typeverb=1):
dictfact = retr_factconv()
pathlygo = os.environ['EPHESUS_DATA_PATH'] + '/'
pathexof = pathlygo + 'data/exofop_tess_tois.csv'
if typeverb > 0:
print('Reading from %s...' % pathexof)
objtexof = pd.read_csv(pathexof, skiprows=0)
dicttoii = {}
dicttoii['toii'] = objtexof['TOI'].values
numbcomp = dicttoii['toii'].size
indxcomp = np.arange(numbcomp)
toiitargexof = np.empty(numbcomp, dtype=object)
for k in indxcomp:
toiitargexof[k] = int(dicttoii['toii'][k])
if toiitarg is None:
indxcomp = np.arange(numbcomp)
else:
indxcomp = np.where(toiitargexof == toiitarg)[0]
dicttoii['toii'] = dicttoii['toii'][indxcomp]
numbcomp = indxcomp.size
if indxcomp.size == 0:
if typeverb > 0:
print('The host name, %s, was not found in the ExoFOP TOI Catalog.' % toiitarg)
return None
else:
dicttoii['namestar'] = np.empty(numbcomp, dtype=object)
dicttoii['nameplan'] = np.empty(numbcomp, dtype=object)
for kk, k in enumerate(indxcomp):
dicttoii['nameplan'][kk] = 'TOI-' + str(dicttoii['toii'][kk])
dicttoii['namestar'][kk] = 'TOI-' + str(dicttoii['toii'][kk])[:-3]
dicttoii['dept'] = objtexof['Depth (ppm)'].values[indxcomp] * 1e-3 # [ppt]
dicttoii['rrat'] = np.sqrt(dicttoii['dept'] * 1e-3)
dicttoii['radicomp'] = objtexof['Planet Radius (R_Earth)'][indxcomp].values
dicttoii['stdvradicomp'] = objtexof['Planet Radius (R_Earth) err'][indxcomp].values
rascstarstrg = objtexof['RA'][indxcomp].values
declstarstrg = objtexof['Dec'][indxcomp].values
dicttoii['rascstar'] = np.empty_like(dicttoii['radicomp'])
dicttoii['declstar'] = np.empty_like(dicttoii['radicomp'])
for k in range(dicttoii['radicomp'].size):
objt = astropy.coordinates.SkyCoord('%s %s' % (rascstarstrg[k], declstarstrg[k]), unit=(astropy.units.hourangle, astropy.units.deg))
dicttoii['rascstar'][k] = objt.ra.degree
dicttoii['declstar'][k] = objt.dec.degree
dicttoii['strgcomm'] = np.empty(numbcomp, dtype=object)
dicttoii['strgcomm'][:] = objtexof['Comments'][indxcomp].values
#objticrs = astropy.coordinates.SkyCoord(ra=dicttoii['rascstar']*astropy.units.degree, \
# dec=dicttoii['declstar']*astropy.units.degree, frame='icrs')
objticrs = astropy.coordinates.SkyCoord(ra=dicttoii['rascstar'], \
dec=dicttoii['declstar'], frame='icrs', unit='deg')
# transit duration
dicttoii['duratrantotl'] = objtexof['Duration (hours)'].values[indxcomp] # [hours]
# galactic longitude
dicttoii['lgalstar'] = np.array([objticrs.galactic.l])[0, :]
# galactic latitude
dicttoii['bgalstar'] = np.array([objticrs.galactic.b])[0, :]
# ecliptic longitude
dicttoii['loecstar'] = np.array([objticrs.barycentricmeanecliptic.lon.degree])[0, :]
# ecliptic latitude
dicttoii['laecstar'] = np.array([objticrs.barycentricmeanecliptic.lat.degree])[0, :]
dicttoii['tsmmacwg'] = objtexof['ACWG TSM'][indxcomp].values
dicttoii['esmmacwg'] = objtexof['ACWG ESM'][indxcomp].values
dicttoii['facidisc'] = np.empty(numbcomp, dtype=object)
dicttoii['facidisc'][:] = 'Transiting Exoplanet Survey Satellite (TESS)'
dicttoii['peri'] = objtexof['Period (days)'][indxcomp].values
dicttoii['peri'][np.where(dicttoii['peri'] == 0)] = np.nan
dicttoii['epoc'] = objtexof['Epoch (BJD)'][indxcomp].values
dicttoii['tmagsyst'] = objtexof['TESS Mag'][indxcomp].values
dicttoii['stdvtmagsyst'] = objtexof['TESS Mag err'][indxcomp].values
# transit duty cycle
dicttoii['dcyc'] = dicttoii['duratrantotl'] / dicttoii['peri'] / 24.
boolfrst = np.zeros(numbcomp)
dicttoii['numbplanstar'] = np.zeros(numbcomp)
liststrgfeatstartici = ['massstar', 'vmagsyst', 'jmagsyst', 'hmagsyst', 'kmagsyst', 'distsyst', 'metastar', 'radistar', 'tmptstar', 'loggstar']
liststrgfeatstarticiinhe = ['mass', 'Vmag', 'Jmag', 'Hmag', 'Kmag', 'd', 'MH', 'rad', 'Teff', 'logg']
numbstrgfeatstartici = len(liststrgfeatstartici)
indxstrgfeatstartici = np.arange(numbstrgfeatstartici)
for strgfeat in liststrgfeatstartici:
dicttoii[strgfeat] = np.zeros(numbcomp)
dicttoii['stdv' + strgfeat] = np.zeros(numbcomp)
## crossmatch with TIC
print('Retrieving TIC columns of TOI hosts...')
dicttoii['tici'] = objtexof['TIC ID'][indxcomp].values
listticiuniq = np.unique(dicttoii['tici'].astype(str))
request = {'service':'Mast.Catalogs.Filtered.Tic', 'format':'json', 'params':{'columns':"*", \
'filters':[{'paramName':'ID', 'values':list(listticiuniq)}]}}
headers, outString = quer_mast(request)
listdictquer = json.loads(outString)['data']
for k in range(len(listdictquer)):
indxtemp = np.where(dicttoii['tici'] == listdictquer[k]['ID'])[0]
if indxtemp.size == 0:
raise Exception('')
for n in indxstrgfeatstartici:
dicttoii[liststrgfeatstartici[n]][indxtemp] = listdictquer[k][liststrgfeatstarticiinhe[n]]
dicttoii['stdv' + liststrgfeatstartici[n]][indxtemp] = listdictquer[k]['e_' + liststrgfeatstarticiinhe[n]]
dicttoii['typedisptess'] = objtexof['TESS Disposition'][indxcomp].values
dicttoii['boolfpos'] = objtexof['TFOPWG Disposition'][indxcomp].values == 'FP'
# augment
dicttoii['numbplanstar'] = np.empty(numbcomp)
boolfrst = np.zeros(numbcomp, dtype=bool)
for kk, k in enumerate(indxcomp):
indxcompthis = np.where(dicttoii['namestar'][kk] == dicttoii['namestar'])[0]
if kk == indxcompthis[0]:
boolfrst[kk] = True
dicttoii['numbplanstar'][kk] = indxcompthis.size
dicttoii['numbplantranstar'] = dicttoii['numbplanstar']
dicttoii['lumistar'] = dicttoii['radistar']**2 * (dicttoii['tmptstar'] / 5778.)**4
dicttoii['stdvlumistar'] = dicttoii['lumistar'] * np.sqrt((2 * dicttoii['stdvradistar'] / dicttoii['radistar'])**2 + \
(4 * dicttoii['stdvtmptstar'] / dicttoii['tmptstar'])**2)
# mass from radii
path = pathlygo + 'exofop_toi_mass_saved.csv'
if not os.path.exists(path):
dicttemp = dict()
dicttemp['masscomp'] = np.ones_like(dicttoii['radicomp']) + np.nan
dicttemp['stdvmasscomp'] = np.ones_like(dicttoii['radicomp']) + np.nan
numbsamppopl = 10
indx = np.where(np.isfinite(dicttoii['radicomp']))[0]
for n in tqdm(range(indx.size)):
k = indx[n]
meanvarb = dicttoii['radicomp'][k]
stdvvarb = dicttoii['stdvradicomp'][k]
# if radius uncertainty is not available, assume that it is small, so the mass uncertainty will be dominated by population uncertainty
if not np.isfinite(stdvvarb):
stdvvarb = 1e-3 * dicttoii['radicomp'][k]
else:
stdvvarb = dicttoii['stdvradicomp'][k]
# sample from a truncated Gaussian
listradicomp = tdpy.samp_gaustrun(1000, dicttoii['radicomp'][k], stdvvarb, 0., np.inf)
# estimate the mass from samples
listmassplan = retr_massfromradi(listradicomp)
dicttemp['masscomp'][k] = np.mean(listmassplan)
dicttemp['stdvmasscomp'][k] = np.std(listmassplan)
if typeverb > 0:
print('Writing to %s...' % path)
pd.DataFrame.from_dict(dicttemp).to_csv(path, index=False)
else:
if typeverb > 0:
print('Reading from %s...' % path)
dicttemp = pd.read_csv(path).to_dict(orient='list')
for name in dicttemp:
dicttemp[name] = np.array(dicttemp[name])
if toiitarg is not None:
dicttemp[name] = dicttemp[name][indxcomp]
dicttoii['masscomp'] = dicttemp['masscomp']
dicttoii['stdvmasscomp'] = dicttemp['stdvmasscomp']
dicttoii['masstotl'] = dicttoii['massstar'] + dicttoii['masscomp'] / dictfact['msme']
dicttoii['smax'] = retr_smaxkepl(dicttoii['peri'], dicttoii['masstotl'])
dicttoii['inso'] = dicttoii['lumistar'] / dicttoii['smax']**2
dicttoii['tmptplan'] = dicttoii['tmptstar'] * np.sqrt(dicttoii['radistar'] / dicttoii['smax'] / 2. / dictfact['aurs'])
# temp check if factor of 2 is right
dicttoii['stdvtmptplan'] = np.sqrt((dicttoii['stdvtmptstar'] / dicttoii['tmptstar'])**2 + \
0.5 * (dicttoii['stdvradistar'] / dicttoii['radistar'])**2) / np.sqrt(2.)
dicttoii['densplan'] = 5.51 * dicttoii['masscomp'] / dicttoii['radicomp']**3 # [g/cm^3]
dicttoii['booltran'] = np.ones_like(dicttoii['toii'], dtype=bool)
dicttoii['vesc'] = retr_vesc(dicttoii['masscomp'], dicttoii['radicomp'])
print('temp: vsiistar and projoblq are NaNs')
dicttoii['vsiistar'] = np.ones(numbcomp) + np.nan
dicttoii['projoblq'] = np.ones(numbcomp) + np.nan
# replace confirmed planet features
if boolreplexar:
dictexar = retr_dictexar()
listdisptess = objtexof['TESS Disposition'][indxcomp].values.astype(str)
listdisptfop = objtexof['TFOPWG Disposition'][indxcomp].values.astype(str)
indxexofcpla = np.where((listdisptfop == 'CP') & (listdisptess == 'PC'))[0]
listticicpla = dicttoii['tici'][indxexofcpla]
numbticicpla = len(listticicpla)
indxticicpla = np.arange(numbticicpla)
for k in indxticicpla:
indxexartici = np.where((dictexar['tici'] == int(listticicpla[k])) & \
(dictexar['facidisc'] == 'Transiting Exoplanet Survey Satellite (TESS)'))[0]
indxexoftici = np.where(dicttoii['tici'] == int(listticicpla[k]))[0]
for strg in dictexar.keys():
if indxexartici.size > 0:
dicttoii[strg] = np.delete(dicttoii[strg], indxexoftici)
dicttoii[strg] = np.concatenate((dicttoii[strg], dictexar[strg][indxexartici]))
# calculate TSM and ESM
calc_tsmmesmm(dicttoii)
# turn zero TSM ACWG or ESM ACWG into NaN
indx = np.where(dicttoii['tsmmacwg'] == 0)[0]
dicttoii['tsmmacwg'][indx] = np.nan
indx = np.where(dicttoii['esmmacwg'] == 0)[0]
dicttoii['esmmacwg'][indx] = np.nan
return dicttoii
def calc_tsmmesmm(dictpopl, boolsamp=False):
if boolsamp:
numbsamp = 1000
else:
numbsamp = 1
numbcomp = dictpopl['masscomp'].size
listtsmm = np.empty((numbsamp, numbcomp)) + np.nan
listesmm = np.empty((numbsamp, numbcomp)) + np.nan
for n in range(numbcomp):
if not np.isfinite(dictpopl['tmptplan'][n]):
continue
if not np.isfinite(dictpopl['radicomp'][n]):
continue
if boolsamp:
if not np.isfinite(dictpopl['stdvradicomp'][n]):
stdv = dictpopl['radicomp'][n]
else:
stdv = dictpopl['stdvradicomp'][n]
listradicomp = tdpy.samp_gaustrun(numbsamp, dictpopl['radicomp'][n], stdv, 0., np.inf)
listmassplan = tdpy.samp_gaustrun(numbsamp, dictpopl['masscomp'][n], dictpopl['stdvmasscomp'][n], 0., np.inf)
if not np.isfinite(dictpopl['stdvtmptplan'][n]):
stdv = dictpopl['tmptplan'][n]
else:
stdv = dictpopl['stdvtmptplan'][n]
listtmptplan = tdpy.samp_gaustrun(numbsamp, dictpopl['tmptplan'][n], stdv, 0., np.inf)
if not np.isfinite(dictpopl['stdvradistar'][n]):
stdv = dictpopl['radistar'][n]
else:
stdv = dictpopl['stdvradistar'][n]
listradistar = tdpy.samp_gaustrun(numbsamp, dictpopl['radistar'][n], stdv, 0., np.inf)
listkmagsyst = tdpy.icdf_gaus(np.random.rand(numbsamp), dictpopl['kmagsyst'][n], dictpopl['stdvkmagsyst'][n])
listjmagsyst = tdpy.icdf_gaus(np.random.rand(numbsamp), dictpopl['jmagsyst'][n], dictpopl['stdvjmagsyst'][n])
listtmptstar = tdpy.samp_gaustrun(numbsamp, dictpopl['tmptstar'][n], dictpopl['stdvtmptstar'][n], 0., np.inf)
else:
listradicomp = dictpopl['radicomp'][None, n]
listtmptplan = dictpopl['tmptplan'][None, n]
listmassplan = dictpopl['masscomp'][None, n]
listradistar = dictpopl['radistar'][None, n]
listkmagsyst = dictpopl['kmagsyst'][None, n]
listjmagsyst = dictpopl['jmagsyst'][None, n]
listtmptstar = dictpopl['tmptstar'][None, n]
# TSM
listtsmm[:, n] = retr_tsmm(listradicomp, listtmptplan, listmassplan, listradistar, listjmagsyst)
# ESM
listesmm[:, n] = retr_esmm(listtmptplan, listtmptstar, listradicomp, listradistar, listkmagsyst)
#if (listesmm[:, n] < 1e-10).any():
# print('listradicomp')
# summgene(listradicomp)
# print('listtmptplan')
# summgene(listtmptplan)
# print('listmassplan')
# summgene(listmassplan)
# print('listradistar')
# summgene(listradistar)
# print('listkmagsyst')
# summgene(listkmagsyst)
# print('listjmagsyst')
# summgene(listjmagsyst)
# print('listtmptstar')
# summgene(listtmptstar)
# print('listesmm[:, n]')
# summgene(listesmm[:, n])
# raise Exception('')
dictpopl['tsmm'] = np.nanmedian(listtsmm, 0)
dictpopl['stdvtsmm'] = np.nanstd(listtsmm, 0)
dictpopl['esmm'] = np.nanmedian(listesmm, 0)
dictpopl['stdvesmm'] = np.nanstd(listesmm, 0)
#print('listesmm')
#summgene(listesmm)
#print('dictpopl[tsmm]')
#summgene(dictpopl['tsmm'])
#print('dictpopl[esmm]')
#summgene(dictpopl['esmm'])
#print('dictpopl[stdvtsmm]')
#summgene(dictpopl['stdvtsmm'])
#print('dictpopl[stdvesmm]')
#summgene(dictpopl['stdvesmm'])
#raise Exception('')
def retr_reso(listperi, maxmordr=10):
if np.where(listperi == 0)[0].size > 0:
raise Exception('')
numbsamp = listperi.shape[0]
numbcomp = listperi.shape[1]
indxcomp = np.arange(numbcomp)
listratiperi = np.zeros((numbsamp, numbcomp, numbcomp))
intgreso = np.zeros((numbcomp, numbcomp, 2))
for j in indxcomp:
for jj in indxcomp:
if j >= jj:
continue
rati = listperi[:, j] / listperi[:, jj]
#print('listperi')
#print(listperi)
#print('rati')
#print(rati)
if rati < 1:
listratiperi[:, j, jj] = 1. / rati
else:
listratiperi[:, j, jj] = rati
minmdiff = 1e100
for a in range(1, maxmordr):
for aa in range(1, maxmordr):
diff = abs(float(a) / aa - listratiperi[:, j, jj])
if np.mean(diff) < minmdiff:
minmdiff = np.mean(diff)
minmreso = a, aa
intgreso[j, jj, :] = minmreso
#print('minmdiff')
#print(minmdiff)
#print('minmreso')
#print(minmreso)
#print
return intgreso, listratiperi
def retr_dilu(tmpttarg, tmptcomp, strgwlentype='tess'):
if strgwlentype != 'tess':
raise Exception('')
else:
binswlen = np.linspace(0.6, 1.)
meanwlen = (binswlen[1:] + binswlen[:-1]) / 2.
diffwlen = (binswlen[1:] - binswlen[:-1]) / 2.
fluxtarg = tdpy.retr_specbbod(tmpttarg, meanwlen)
fluxtarg = np.sum(diffwlen * fluxtarg)
fluxcomp = tdpy.retr_specbbod(tmptcomp, meanwlen)
fluxcomp = np.sum(diffwlen * fluxcomp)
dilu = 1. - fluxtarg / (fluxtarg + fluxcomp)
return dilu
def anim_tmptdete(timefull, lcurfull, meantimetmpt, lcurtmpt, pathimag, listindxtimeposimaxm, corrprod, corr, strgextn='', \
## file type of the plot
typefileplot='pdf', \
colr=None):
numbtimefull = timefull.size
numbtimekern = lcurtmpt.size
numbtimefullruns = numbtimefull - numbtimekern
indxtimefullruns = np.arange(numbtimefullruns)
listpath = []
cmnd = 'convert -delay 20'
numbtimeanim = min(200, numbtimefullruns)
indxtimefullrunsanim = np.random.choice(indxtimefullruns, size=numbtimeanim, replace=False)
indxtimefullrunsanim = np.sort(indxtimefullrunsanim)
for tt in indxtimefullrunsanim:
path = pathimag + 'lcur%s_%08d.%s' % (strgextn, tt, typefileplot)
listpath.append(path)
if not os.path.exists(path):
plot_tmptdete(timefull, lcurfull, tt, meantimetmpt, lcurtmpt, path, listindxtimeposimaxm, corrprod, corr)
cmnd += ' %s' % path
pathanim = pathimag + 'lcur%s.gif' % strgextn
cmnd += ' %s' % pathanim
print('cmnd')
print(cmnd)
os.system(cmnd)
cmnd = 'rm'
for path in listpath:
cmnd += ' ' + path
os.system(cmnd)
def plot_tmptdete(timefull, lcurfull, tt, meantimetmpt, lcurtmpt, path, listindxtimeposimaxm, corrprod, corr):
numbtimekern = lcurtmpt.size
indxtimekern = np.arange(numbtimekern)
numbtimefull = lcurfull.size
numbtimefullruns = numbtimefull - numbtimekern
indxtimefullruns = np.arange(numbtimefullruns)
difftime = timefull[1] - timefull[0]
figr, axis = plt.subplots(5, 1, figsize=(8, 11))
# plot the whole light curve
proc_axiscorr(timefull, lcurfull, axis[0], listindxtimeposimaxm)
# plot zoomed-in light curve
minmindx = max(0, tt - int(numbtimekern / 4))
maxmindx = min(numbtimefullruns - 1, tt + int(5. * numbtimekern / 4))
indxtime = np.arange(minmindx, maxmindx + 1)
print('indxtime')
summgene(indxtime)
proc_axiscorr(timefull, lcurfull, axis[1], listindxtimeposimaxm, indxtime=indxtime)
# plot template
axis[2].plot(timefull[0] + meantimetmpt + tt * difftime, lcurtmpt, color='b', marker='v')
axis[2].set_ylabel('Template')
axis[2].set_xlim(axis[1].get_xlim())
# plot correlation
axis[3].plot(timefull[0] + meantimetmpt + tt * difftime, corrprod[tt, :], color='red', marker='o')
axis[3].set_ylabel('Correlation')
axis[3].set_xlim(axis[1].get_xlim())
# plot the whole total correlation
print('indxtimefullruns')
summgene(indxtimefullruns)
print('timefull')
summgene(timefull)
print('corr')
summgene(corr)
axis[4].plot(timefull[indxtimefullruns], corr, color='m', marker='o', ms=1, rasterized=True)
axis[4].set_ylabel('Total correlation')
titl = 'C = %.3g' % corr[tt]
axis[0].set_title(titl)
limtydat = axis[0].get_ylim()
axis[0].fill_between(timefull[indxtimekern+tt], limtydat[0], limtydat[1], alpha=0.4)
print('Writing to %s...' % path)
plt.savefig(path)
plt.close()
def proc_axiscorr(time, lcur, axis, listindxtimeposimaxm, indxtime=None, colr='k', timeoffs=2457000):
if indxtime is None:
indxtimetemp = np.arange(time.size)
else:
indxtimetemp = indxtime
axis.plot(time[indxtimetemp], lcur[indxtimetemp], ls='', marker='o', color=colr, rasterized=True, ms=0.5)
maxmydat = axis.get_ylim()[1]
for kk in range(len(listindxtimeposimaxm)):
if listindxtimeposimaxm[kk] in indxtimetemp:
axis.plot(time[listindxtimeposimaxm[kk]], maxmydat, marker='v', color='b')
#print('timeoffs')
#print(timeoffs)
#axis.set_xlabel('Time [BJD-%d]' % timeoffs)
axis.set_ylabel('Relative flux')
def srch_flar(time, lcur, typeverb=1, strgextn='', numbkern=3, minmscalfalltmpt=None, maxmscalfalltmpt=None, \
pathimag=None, boolplot=True, boolanim=False, thrs=None):
minmtime = np.amin(time)
timeflartmpt = 0.
amplflartmpt = 1.
scalrisetmpt = 0. / 24.
difftime = np.amin(time[1:] - time[:-1])
print('time')
summgene(time)
print('difftime')
print(difftime)
if minmscalfalltmpt is None:
minmscalfalltmpt = 3 * difftime
if maxmscalfalltmpt is None:
maxmscalfalltmpt = 3. / 24.
if typeverb > 1:
print('lcurtmpt')
summgene(lcurtmpt)
indxscalfall = np.arange(numbkern)
listscalfalltmpt = np.linspace(minmscalfalltmpt, maxmscalfalltmpt, numbkern)
print('listscalfalltmpt')
print(listscalfalltmpt)
listcorr = []
listlcurtmpt = [[] for k in indxscalfall]
meantimetmpt = [[] for k in indxscalfall]
for k in indxscalfall:
numbtimekern = 3 * int(listscalfalltmpt[k] / difftime)
print('numbtimekern')
print(numbtimekern)
meantimetmpt[k] = np.arange(numbtimekern) * difftime
print('meantimetmpt[k]')
summgene(meantimetmpt[k])
if numbtimekern == 0:
raise Exception('')
listlcurtmpt[k] = hattusa.retr_lcurmodl_flarsing(meantimetmpt[k], timeflartmpt, amplflartmpt, scalrisetmpt, listscalfalltmpt[k])
if not np.isfinite(listlcurtmpt[k]).all():
raise Exception('')
corr, listindxtimeposimaxm, timefull, lcurfull = corr_tmpt(time, lcur, meantimetmpt, listlcurtmpt, thrs=thrs, boolanim=boolanim, boolplot=boolplot, \
typeverb=typeverb, strgextn=strgextn, pathimag=pathimag)
#corr, listindxtimeposimaxm, timefull, rflxfull = corr_tmpt(gdat.timethis, gdat.rflxthis, gdat.listtimetmpt, gdat.listdflxtmpt, \
# thrs=gdat.thrstmpt, boolanim=gdat.boolanimtmpt, boolplot=gdat.boolplottmpt, \
# typeverb=gdat.typeverb, strgextn=gdat.strgextnthis, pathimag=gdat.pathtargimag)
return corr, listindxtimeposimaxm, meantimetmpt, timefull, lcurfull
#@jit(nopython=True, parallel=True, fastmath=True, nogil=True)
def corr_arryprod(lcurtemp, lcurtmpt, numbkern):
# correlate
corrprod = [[] for k in range(numbkern)]
for k in range(numbkern):
corrprod[k] = lcurtmpt[k] * lcurtemp[k]
return corrprod
#@jit(parallel=True)
def corr_copy(indxtimefullruns, lcurstan, indxtimekern, numbkern):
'''
Make a matrix with rows as the shifted and windowed copies of the time series.
'''
print('corr_copy()')
listlcurtemp = [[] for k in range(numbkern)]
for k in range(numbkern):
numbtimefullruns = indxtimefullruns[k].size
numbtimekern = indxtimekern[k].size
listlcurtemp[k] = np.empty((numbtimefullruns, numbtimekern))
print('k')
print(k)
print('numbtimefullruns')
print(numbtimefullruns)
print('numbtimekern')
print(numbtimekern)
for t in range(numbtimefullruns):
listlcurtemp[k][t, :] = lcurstan[indxtimefullruns[k][t]+indxtimekern[k]]
print('listlcurtemp[k]')
summgene(listlcurtemp[k])
print('')
return listlcurtemp
def corr_tmpt(time, lcur, meantimetmpt, listlcurtmpt, typeverb=2, thrs=None, strgextn='', pathimag=None, boolplot=True, \
## file type of the plot
typefileplot='pdf', \
boolanim=False, \
):
timeoffs =
|
np.amin(time)
|
numpy.amin
|
import numpy as np
import pandas as pd
from skimage.morphology import watershed, dilation, disk, reconstruction
from skimage.transform import resize
from skimage.measure import regionprops, label
from tqdm import trange, tqdm
from joblib import Parallel, delayed
import time
import asyncio
def background(f):
def wrapped(*args, **kwargs):
return asyncio.get_event_loop().run_in_executor(None, f, *args, **kwargs)
def get_names(feat_list):
"""
feat_list: list of feature defined in 'feature_object'
Returns list of the feature names.
"""
names = []
for el in feat_list:
if el.size != 1:
for it in range(el.size):
names.append(el._return_name()[it])
else:
names.append(el._return_name())
return names
def clear_marge(bin, marge):
"""
Removes the object within the margins of a given binary image
bin: binary image
marge: integer
"""
if marge is not None and marge != 0:
time1 = time.time()
seed = np.zeros_like(bin)
seed[marge:-marge, marge:-marge] = 1
mask = bin.copy()
mask[mask > 0] = 1
mask[marge:-marge, marge:-marge] = 1
time2 = time.time()
reconstructed = reconstruction(seed, mask, 'dilation')
bin[reconstructed == 0] = 0
time3 = time.time()
seed = np.ones_like(bin)
seed[marge:-marge, marge:-marge] = 0
mask = bin.copy()
mask[mask > 0] = 1
mask[seed > 0] = 1
time4 = time.time()
reconstructed = reconstruction(seed, mask, 'dilation')
frontier = bin.copy()
time5 = time.time()
frontier[reconstructed == 0] = 0
front_lb = label(frontier)
front_obj = regionprops(front_lb)
to_remove = np.zeros_like(bin)
time6 = time.time()
for obj in front_obj:
x, y = obj.centroid
if not (marge < x and x < (bin.shape[0] - marge)) or not (marge < y and y < (bin.shape[1] - marge)):
lb = obj.label
to_remove[front_lb == lb] = 1
time7 = time.time()
print(f"clear_marge Step1 {(time2 - time1)*1000} ms")
print(f"clear_marge Step2 {(time3 - time2)*1000} ms")
print(f"clear_marge Step3 {(time4 - time3)*1000} ms")
print(f"clear_marge Step4 {(time5 - time4)*1000} ms")
print(f"clear_marge Step5 {(time6 - time5)*1000} ms")
print(f"clear_marge Step6 {(time7 - time6)*1000} ms")
bin[to_remove > 0] = 0
return bin
import time
def needed_grown_region(list_feature):
"""
Looks if any of the features needs a specific growing of the objects by dilation.
"""
res = []
for feat in list_feature:
if feat._return_n_extension() not in res:
res += [feat._return_n_extension()]
return res
def bin_analyser(rgb_image, bin_image, list_feature,
marge=None, pandas_table=False, do_label=True):
"""
for each object in the bin image (in the margin), for each feature in list_feature
bin_analyser returns a table (maybe pandas) where each line corresponds to a object
and has many features.
"""
time1 = time.time()
bin_image_copy = bin_image.copy()
p = 0
for feat in list_feature:
p += feat.size
# import pdb; pdb.set_trace()
bin_image_copy = clear_marge(bin_image_copy, marge)
time12 = time.time()
if do_label:
bin_image_copy = label(bin_image_copy)
time13 = time.time()
if len(
|
np.unique(bin_image_copy)
|
numpy.unique
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 4 13:24:39 2017
@author: tkoller
"""
import builtins
import numpy as np
import pytest
import sys
from scipy.optimize import approx_fprime
from ..environments import InvertedPendulum, CartPole
from ..utils import sample_inside_polytope
|
np.random.seed(0)
|
numpy.random.seed
|
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
# Converts a Tensor into an image array (numpy)
# select the 1st image in a batch.
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x),
|
np.median(x)
|
numpy.median
|
import time
from collections import Counter
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from Hongbog.EyeVerification.native_v3.constants import *
from Hongbog.EyeVerification.native_v3.multi_scale_mobilenet_v2_model import Model
from Hongbog.EyeVerification.native_v3.multi_scale_dataloader import DataLoader
from Hongbog.EyeVerification.native_v3.cam import GradCAM
class Neuralnet:
def __init__(self, is_logging, save_type=None):
self.is_logging = is_logging
self.save_type = save_type
self.loader = DataLoader(batch_size=flags.FLAGS.batch_size,
train_right_root_path=flags.FLAGS.right_train_data_path,
test_right_root_path=flags.FLAGS.right_test_data_path,
train_left_root_path=flags.FLAGS.left_train_data_path,
test_left_root_path=flags.FLAGS.left_test_data_path)
def train(self):
self.loader.train_init()
print('>> Train DataLoader created')
train_num = self.loader.train_right_x_len // flags.FLAGS.batch_size
train_right_low1, train_right_low2, train_right_low3, train_right_low4, train_right_low5, train_right_low6,\
train_left_low1, train_left_low2, train_left_low3, train_left_low4, train_left_low5, train_left_low6 = self.loader.train_low_loader()
train_right_mid1, train_right_mid2, train_right_mid3, train_right_mid4, train_right_mid5, train_right_mid16, \
train_left_mid1, train_left_mid2, train_left_mid3, train_left_mid4, train_left_mid5, train_left_mid6 = self.loader.train_mid_loader()
train_right_high1, train_right_high2, train_right_high3, train_right_high4, train_right_high5, train_right_high6, \
train_left_high1, train_left_high2, train_left_high3, train_left_high4, train_left_high5, train_left_high6 = self.loader.train_high_loader()
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.7)
)
with tf.Session(config=config) as sess:
right_model = Model(sess=sess, lr=flags.FLAGS.learning_rate, is_training=True, is_logging=self.is_logging, name='right')
left_model = Model(sess=sess, lr=flags.FLAGS.learning_rate, is_training=True, is_logging=self.is_logging, name='left')
print('>> Tensorflow session built. Variables initialized')
sess.run(tf.global_variables_initializer())
'''훈련 데이터 및 텐서보드 모니터링 로그 저장 디렉토리 생성'''
if self.is_logging:
os.makedirs(flags.FLAGS.trained_weight_dir, exist_ok=True)
os.makedirs(os.path.join(flags.FLAGS.tensorboard_log_dir, 'train', 'right'), exist_ok=True)
os.makedirs(os.path.join(flags.FLAGS.tensorboard_log_dir, 'train', 'left'), exist_ok=True)
os.makedirs(os.path.join(flags.FLAGS.tensorboard_log_dir, 'test', 'right'), exist_ok=True)
os.makedirs(os.path.join(flags.FLAGS.tensorboard_log_dir, 'test', 'left'), exist_ok=True)
'''텐서플로우 그래프 저장'''
tf.train.write_graph(sess.graph_def, flags.FLAGS.trained_weight_dir, 'graph.pbtxt')
print('>> Graph saved')
self._saver = tf.train.Saver(var_list=tf.global_variables())
ckpt_st = tf.train.get_checkpoint_state(os.path.join(flags.FLAGS.trained_weight_dir))
if ckpt_st is not None:
'''restore 시에는 tf.global_variables_initializer() 가 필요 없다.'''
#self._saver.restore(sess, ckpt_st.model_checkpoint_path)
print('>> Model Restored')
'''텐서보드 로깅을 위한 FileWriter 생성'''
if self.is_logging:
train_right_writer = tf.summary.FileWriter(flags.FLAGS.tensorboard_log_dir + '\\train\\right', graph=tf.get_default_graph())
train_left_writer = tf.summary.FileWriter(flags.FLAGS.tensorboard_log_dir + '\\train\\left', graph=tf.get_default_graph())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
print('>> Running started')
for epoch in range(1, flags.FLAGS.epochs+1):
tot_train_right_acc, tot_train_right_loss = [], []
tot_train_left_acc, tot_train_left_loss = [], []
if epoch % 5 == 0:
right_model.lr = max(right_model.lr / 2, 0.0001)
left_model.lr = max(left_model.lr / 2, 0.0001)
'''Model Train'''
train_st = time.time()
for step in range(1, train_num+1):
'''Data Loading - low, middle, high 당 (right, left 300 개씩)'''
train_low_data = sess.run([train_right_low1, train_right_low2, train_right_low3, train_right_low4, train_right_low5, train_right_low6,
train_left_low1, train_left_low2, train_left_low3, train_left_low4, train_left_low5, train_left_low6])
train_low_right_batch_x, train_low_right_batch_y = np.concatenate([right_data[0] for right_data in train_low_data[:6]]), np.concatenate([right_data[1] for right_data in train_low_data[:6]])
train_low_left_batch_x, train_low_left_batch_y = np.concatenate([left_data[0] for left_data in train_low_data[6:]]), np.concatenate([left_data[1] for left_data in train_low_data[6:]])
train_mid_data = sess.run([train_right_mid1, train_right_mid2, train_right_mid3, train_right_mid4, train_right_mid5, train_right_mid16,
train_left_mid1, train_left_mid2, train_left_mid3, train_left_mid4, train_left_mid5, train_left_mid6])
train_mid_right_batch_x, train_mid_right_batch_y =
|
np.concatenate([right_data[0] for right_data in train_mid_data[:6]])
|
numpy.concatenate
|
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: <NAME>
# Created: 8th October 2015
# Last update: 24th July 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/ # this will probably change
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# Reads in monthly mean anomaly regional average time series for q, T and RH from HadISDH
# Can plot monthly or annual data
# Can plot one region or all four
# For a one region plot it can be annual, monthly or seasonal (DJF, MAM, JJA, SON)
# Plots a T q scatter with each year as the point (or MONYY for monthly)
# Colours the points by simultaneous RH value
# Plots RH colour bar to the right
# Adds Tq and TRH correlation to plot
# Adds Tq and TRH slope to plot
#
# NO MISSING DATA IN TIME SERIES!!!!
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt:
# import matplotlib.pyplot as plt
# import numpy as np
# import numpy.ma as ma
# import sys, os
# import scipy.stats as ss # for pearsonr
# import struct
# import datetime as dt
# from matplotlib.dates import date2num,num2date
# from scipy.io import netcdf
# import matplotlib.colors as mc
# import matplotlib.cm as mpl_cm
# import pdb
#
# Other:
# ReadNetCDFTS - infile function to read in netCDF timeseries, written by <NAME>
# PlotScatter - infile function to plot, written by <NAME>
#
# -----------------------
# DATA
# -----------------------
# directory for regional timeseries:
# /data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/
# files currently worked on:
# Specific humidity:
# HadISDH.landq.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
# Relative humidity:
# HadISDH.landRH.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
# Temperature:
# HadISDH.landT.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Select 'TimeRes' to be 'M' or 'Y' for month or year
# Ensure correct file paths and files
# Ensure start year (styr) and end year (edyr) are correct
# Select 'Region' to be 'A', 'G','N','T' or 'S' for All, Globe, NHemi, Tropics, SHemi
#
# run:
# python2.7 PlotTqRhScatter_OCT2015.py
# python3
# > module load scitools/default-current
# > python PlotTqRHScatter_PCT2015.py
#
# -----------------------
# OUTPUT
# -----------------------
# directory for output images:
# /data/local/hadkw/HADCRUH2/UPDATE2014/IMAGES/ANALYSIS/
# Output image file: (nowmon+nowyear= e.g., OCT2015):
# ScatterTqRH_HadISDH.landq.2.0.1.2014p_'+nowmon+nowyear+
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 3 16 April 2018
# ---------
#
# Enhancements
# python 3
# netCDF4
# masked arrays to deal with missing data
# Can now do seasonal for individual regions
#
# Changes
#
# Bug fixes
#
# Version 3 16 April 2018
# ---------
#
# Enhancements
# Updated editable info so fewer edits are required to run for the most recent version/year
#
# Changes
#
# Bug fixes
#
# Version 2 9 August 2016
# ---------
#
# Enhancements
# Can also plot T vs RH coloured by q anomaly
#
# Changes
#
# Bug fixes
#
# Version 1 8 October 2015
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Set up python imports
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import sys, os
import scipy.stats as ss # for pearsonr
import struct
import datetime as dt
from matplotlib.dates import date2num,num2date
#from scipy.io import netcdf
import netCDF4 as nc4
import matplotlib.colors as mc
import matplotlib.cm as mpl_cm
import pdb #stop: pdb.set_trace(), start: c
import numpy.ma as ma
# Set up initial run choices
TimeRes='Y' # M=month, Y=year
Region='S' # A=All, G=Globe, N=NHemi, T=Tropics, S=SHemi
Seasons=True # If Region is G, N, T, or S and Seasons == True then plot seasonally (or False for not) M and Y still works
homogtype='IDPHA' # 'IDPHA','PHA','PHADPD'
thenmon='JAN'
thenyear='2020'
version='4.2.0.2019f'
styr=1973
edyr=2019
nyrs=(edyr-styr)+1
nmons=(nyrs)*12
if (TimeRes == 'Y'):
ntims=nyrs
else:
ntims=nmons
YrStr=np.array(range(styr,edyr+1),dtype=str)
YrStr=np.array(([i[2:5] for i in YrStr])) # now a string array of the last two digits
# Set up directories and files
INDIR='/data/users/hadkw/WORKING_HADISDH/UPDATE'+str(edyr)+'/STATISTICS/TIMESERIES/'
OUTDIR='/data/users/hadkw/WORKING_HADISDH/UPDATE'+str(edyr)+'/IMAGES/ANALYSIS/'
In_q='HadISDH.landq.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
In_RH='HadISDH.landRH.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
In_T='HadISDH.landT.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
OutPlotTq='ScatterTqbyRH_HadISDH.'+version+'_'+TimeRes+'_'+Region
OutPlotTRH='ScatterTRHbyq_HadISDH.'+version+'_'+TimeRes+'_'+Region
if (Seasons):
OutPlotTq = 'ScatterTqbyRH_HadISDH.'+version+'_'+TimeRes+'_'+Region+'_SEASONS'
OutPlotTRH = 'ScatterTRHbyq_HadISDH.'+version+'_'+TimeRes+'_'+Region+'_SEASONS'
# Set up variables
q_arr=0 #set once file read in
T_arr=0 #set once file read in
RH_arr=0 #set once file read in
#************************************************************************
# Subroutines
#************************************************************************
# READNETCDFTS
def ReadNetCDFTS(FileName,ReadInfo,TheData):
''' Open the NetCDF File
Get the data
FileName: stroing containing filepath/name
TheData: an empty 2D array big enough for 1 or 4 regions worth of data
ReadInfo: list of 1 or 4 strings of variable name/s for the globe, N Hemi, Tropics and S.Hemi '''
ncf=nc4.Dataset(FileName,'r')
# ncf.variables this lists the variable names
for loo in range(len(ReadInfo)):
print(loo)
var=ncf.variables[ReadInfo[loo]]
#pdb.set_trace()
TheData[loo,:]=np.copy(var[:])
# # Maybe I've done something wrong but its reading it transposed
# TheData=np.transpose(TheData)
ncf.close()
return TheData # ReadNetCDFTS
#************************************************************************
# MakeUpSteps
def MakeUpSteps(TheArray,stepsies=9):
''' Given a max and min, make up NICE step sizes for a 9 element colourbar '''
''' Currently works with a minimum range of 0.2 and a maximum or 3.0 '''
''' Can only deal with symmetric ranges '''
''' READS: TheArray - an array of data '''
''' stepsies (OPTIONAL) - number of colours in colourbar - default 9 is NICE '''
''' RETURNS: vmin - minimum threshold of range '''
''' vmax - maximum threshold of range '''
''' bounds - stepsies linear increments through the range from vmin to vmax '''
''' strcounds - strings of the bounds for labelling the colourbar '''
vmax=np.int(np.ceil(np.max(abs(TheArray))*10))/10.
vmin=-vmax
nsteps = stepsies
if (vmax <= 0.2):
vmax = 0.2
vmin = -0.2
if (vmax <= 0.3):
vmax = 0.32
vmin = -0.32
elif (vmax <= 0.4):
vmax = 0.4
vmin = -0.4
elif (vmax <= 0.6):
vmax = 0.6
vmin = -0.6
elif (vmax <= 0.8):
vmax = 0.8
vmin = -0.8
elif (vmax <= 1.0):
vmax = 1.0
vmin = -1.0
elif (vmax <= 1.2):
vmax = 1.2
vmin = -1.2
elif (vmax <= 1.6):
vmax = 1.6
vmin = -1.6
elif (vmax <= 2.0):
vmax = 2.0
vmin = -2.0
elif (vmax <= 3.0):
vmax = 3.0
vmin = -3.0
# pdb.set_trace() # stop here and play
bounds=np.linspace(vmin,vmax,nsteps)
strbounds=["%4.1f" % i for i in bounds]
return vmax,vmin,strbounds,bounds
#************************************************************************
# PlotScatter
def PlotScatter(TheFileTq,TheFileTRH,TheYrStr,Thentims,Theq_arr,TheRH_arr,TheT_arr,TheReg,TheSeasons,ThePointees):
''' Plot Tq scatter with colours related to RH'''
''' Plot TRH scatter with colours related to q'''
''' Points are either the last two years YY or MONYY '''
''' Save as png and eps '''
''' TheFile - the filepath and filename for the image '''
''' TheYrStr - a string array of the last two digits for years NYrs long '''
''' Thentims - an integer for the number of points to be plotted '''
''' Theq_arr - the specific humidity data (can be monthly or yearly '''
''' TheRH_arr - the relative humidity data (can be monthly or yearly '''
''' TheT_arr - the temperature data (can be monthly or yearly '''
# Load colours and set up bounds
cmap=plt.get_cmap('BrBG') # BrownBlueGreen
cmaplist=[cmap(i) for i in range(cmap.N)]
for loo in range(np.int(cmap.N/2)-30,np.int(cmap.N/2)+30):
cmaplist.remove(cmaplist[np.int(cmap.N/2)-30]) # remove the very pale colours in the middle
# #cmaplist.remove(cmaplist[(cmap.N/2)-10:(cmap.N/2)+10]) # remove the very pale colours in the middle
#
## remove the darkest and lightest (white and black) - and reverse
# for loo in range(40):
# cmaplist.remove(cmaplist[0])
## cmaplist.reverse()
## for loo in range(10):
## cmaplist.remove(cmaplist[0])
## cmaplist.reverse()
cmap=cmap.from_list('this_cmap',cmaplist,cmap.N)
# FIRST MAKE UP THE TqbyRH plot
# Call MakeUpSteps routine to get a NICE set of colourbar indices
vmin,vmax,strbounds,bounds=MakeUpSteps(TheRH_arr)
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
ytitlee='Specific Humidity Anomalies (g kg$^{-1}$)'
xtitlee='Temperature Anomalies ($^{o}$C)'
titleesR=['Globe 70$^{o}$S to 70$^{o}$N','N. Hemisphere 20$^{o}$N to 70$^{o}$N','Tropics 20$^{o}$S to 20$^{o}$N','S. Hemisphere 70$^{o}$S to 20$^{o}$S']
titleesS=['December-February','March-May','June-August','September-November']
# set up max and min of q and T for axes - keep same for all regions
qmax=np.ceil(np.max(abs(Theq_arr))/0.1)*0.1
qmin=-qmax
tmax=np.ceil(np.max(abs(TheT_arr))/0.1)*0.1
tmin=-tmax
# set up plot - are we working with one region or four?
if (TheReg != 'A'):
# Is it to be a seasonal (four plot) scenario?
if (TheSeasons):
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
#pdb.set_trace()
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],Theq_arr[pp,vv],c=TheRH_arr[pp,vv],marker=r"$ {} $".format(ThePointees[pp,vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titleesS[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],Theq_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],Theq_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
#pdb.set_trace()
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=12,ha='center',rotation='vertical',va='center')
else:
# Single plot scenario
fig = plt.figure(1,figsize=(8,8))
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
ax1=plt.axes([0.1,0.1,0.75,0.8]) # left, bottom, width, height
ax1.set_xlim([tmin,tmax])
ax1.set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax1.plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax1.plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax1.plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
#print(vv,TheT_arr[0,vv],Theq_arr[0,vv],TheRH_arr[0,vv],r"$ {} $".format(Pointees[vv]))
scats=ax1.scatter(TheT_arr[0,vv],Theq_arr[0,vv],c=TheRH_arr[0,vv],marker=r"$ {} $".format(ThePointees[vv]),s=250,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
ax1.set_xlabel(xtitlee,size=14)
ax1.set_ylabel(ytitlee,size=14)
ax1.tick_params(axis='both', which='major', labelsize=14)
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=14)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=14,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
# plt.figtext(0.02,0.96,TheLetter,size=18)
if (TheReg == 'G'):
PointTitle=0
if (TheReg == 'N'):
PointTitle=1
if (TheReg == 'T'):
PointTitle=2
if (TheReg == 'S'):
PointTitle=3
ax1.set_title(titleesR[PointTitle],size=18)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[0,:],Theq_arr[0,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[0,:],Theq_arr[0,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext(0.05,0.96,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext(0.05,0.9,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext(0.05,0.84,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax1.plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
else:
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],Theq_arr[pp,vv],c=TheRH_arr[pp,vv],marker=r"$ {} $".format(ThePointees[vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titlees[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],Theq_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],Theq_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
#pdb.set_trace()
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFileTq+".eps")
plt.savefig(TheFileTq+".png")
# raw_input("stop") # REALLY USEFUL TO INTERACT WITHIN SUBROUTINE ctrl C
# plt.ion()
# plt.show() can then zoom and save
#***********************************
# SECOND MAKE UP THE TRHbyq plot
# Call MakeUpSteps routine to get a NICE set of colourbar indices
vmin,vmax,strbounds,bounds=MakeUpSteps(Theq_arr)
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
ytitlee='Relative Humidity Anomalies (%rh)'
xtitlee='Temperature Anomalies ($^{o}$C)'
titlees=['Globe 70$^{o}$S to 70$^{o}$N','N. Hemisphere 20$^{o}$N to 70$^{o}$N','Tropics 20$^{o}$S to 20$^{o}$N','S. Hemisphere 70$^{o}$S to 20$^{o}$S']
# set up max and min of RH and T for axes - keep same for all regions
rhmax=np.ceil(np.max(abs(TheRH_arr))/0.1)*0.1
rhmin=-rhmax
tmax=np.ceil(np.max(abs(TheT_arr))/0.1)*0.1
tmin=-tmax
# set up plot - are we working with one region or four?
if (TheReg != 'A'):
if (Seasons):
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],TheRH_arr[pp,vv],c=Theq_arr[pp,vv],marker=r"$ {} $".format(ThePointees[pp,vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titleesS[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],TheRH_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],TheRH_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
else:
# Single plot scenario
fig = plt.figure(1,figsize=(8,8))
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
ax1=plt.axes([0.1,0.1,0.75,0.8]) # left, bottom, width, height
ax1.set_xlim([tmin,tmax])
ax1.set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax1.plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax1.plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax1.plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot YEAR LABELS for the goods
for vv in range(Thentims):
#print(vv,TheT_arr[0,vv],Theq_arr[0,vv],TheRH_arr[0,vv],r"$ {} $".format(Pointees[vv]))
scats=ax1.scatter(TheT_arr[0,vv],TheRH_arr[0,vv],c=Theq_arr[0,vv],marker=r"$ {} $".format(ThePointees[vv]),s=250,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
ax1.set_xlabel(xtitlee,size=14)
ax1.set_ylabel(ytitlee,size=14)
ax1.tick_params(axis='both', which='major', labelsize=14)
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=14)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=14,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
# plt.figtext(0.02,0.96,TheLetter,size=18)
if (TheReg == 'G'):
PointTitle=0
if (TheReg == 'N'):
PointTitle=1
if (TheReg == 'T'):
PointTitle=2
if (TheReg == 'S'):
PointTitle=3
ax1.set_title(titlees[PointTitle],size=18)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[0,:],TheRH_arr[0,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[0,:],TheRH_arr[0,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext(0.05,0.96,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext(0.05,0.9,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext(0.05,0.84,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax1.plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
else:
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],TheRH_arr[pp,vv],c=Theq_arr[pp,vv],marker=r"$ {} $".format(ThePointees[vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titlees[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],TheRH_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],TheRH_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFileTRH+".eps")
plt.savefig(TheFileTRH+".png")
# raw_input("stop") # REALLY USEFUL TO INTERACT WITHIN SUBROUTINE ctrl C
# plt.ion()
# plt.show() can then zoom and save
return #PlotNiceDotsMap
#************************************************************************
# MAIN PROGRAM
#************************************************************************
# Read in region data for each variable
if (Region == 'A'):
nReg=4
else:
nReg=1
tmpq_arr=np.empty((nReg,nmons))
tmpRH_arr=np.empty((nReg,nmons))
tmpT_arr=np.empty((nReg,nmons))
q_arr=np.empty((nReg,ntims))
RH_arr=np.empty((nReg,ntims))
T_arr=np.empty((nReg,ntims))
MyFile=INDIR+In_q
if (Region == 'A'):
ReadInfo=['glob_q_anoms','nhem_q_anoms','trop_q_anoms','shem_q_anoms']
elif (Region == 'G'):
ReadInfo=['glob_q_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_q_anoms']
elif (Region == 'T'):
ReadInfo=['trop_q_anoms']
elif (Region == 'S'):
ReadInfo=['shem_q_anoms']
tmpq_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpq_arr)
MyFile=INDIR+In_RH
if (Region == 'A'):
ReadInfo=['glob_RH_anoms','nhem_RH_anoms','trop_RH_anoms','shem_RH_anoms']
elif (Region == 'G'):
ReadInfo=['glob_RH_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_RH_anoms']
elif (Region == 'T'):
ReadInfo=['trop_RH_anoms']
elif (Region == 'S'):
ReadInfo=['shem_RH_anoms']
tmpRH_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpRH_arr)
MyFile=INDIR+In_T
if (Region == 'A'):
ReadInfo=['glob_T_anoms','nhem_T_anoms','trop_T_anoms','shem_T_anoms']
elif (Region == 'G'):
ReadInfo=['glob_T_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_T_anoms']
elif (Region == 'T'):
ReadInfo=['trop_T_anoms']
elif (Region == 'S'):
ReadInfo=['shem_T_anoms']
tmpT_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpT_arr)
#pdb.set_trace()
# If annual - convert monthly mean anomalies to annual mean anomalies
# THERE SHOULD BE NO MISSING DATA IN THESE!!!!
# However, there are because of April 2015 so we need to set up as masked array.
tmpq_arr = ma.masked_where(tmpq_arr < -1000,tmpq_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
tmpT_arr = ma.masked_where(tmpT_arr < -1000,tmpT_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
tmpRH_arr = ma.masked_where(tmpRH_arr < -1000,tmpRH_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
if (Seasons):
SeasonPointer = np.reshape(np.arange(nmons),(nyrs,12))
DJF = np.reshape(SeasonPointer[:,(0,1,11,)],nyrs*3)
MAM = np.reshape(SeasonPointer[:,(2,3,4,)],nyrs*3)
JJA = np.reshape(SeasonPointer[:,(5,6,7,)],nyrs*3)
SON = np.reshape(SeasonPointer[:,(8,9,10,)],nyrs*3)
for rr in range(nReg):
if (TimeRes == 'Y'):
Pointees=YrStr
if (Seasons):
# Need to sort the arrays out into seasonal groups of either annual or months
T_arrS = ma.empty((4,nyrs),dtype=float)
q_arrS = ma.empty((4,nyrs),dtype=float)
RH_arrS = ma.empty((4,nyrs),dtype=float)
#PointeesS = np.empty((4,nyrs),dtype=str)
#pdb.set_trace()
for yy in range(nyrs):
if (yy == 0):
TmpT = tmpT_arr[0,DJF]
T_arrS[0,0] = ma.mean(TmpT[0:2])
TmpTN = np.reshape(TmpT[2:-1],(nyrs-1,3))
Tmpq = tmpq_arr[0,DJF]
q_arrS[0,0] = ma.mean(Tmpq[0:2])
TmpqN = np.reshape(Tmpq[2:-1],(nyrs-1,3))
TmpRH = tmpRH_arr[0,DJF]
RH_arrS[0,0] = ma.mean(TmpRH[0:2])
TmpRHN = np.reshape(TmpRH[2:-1],(nyrs-1,3))
T_arrS[0,yy] = ma.mean(TmpTN[yy-1,:])
q_arrS[0,yy] = ma.mean(TmpqN[yy-1,:])
RH_arrS[0,yy] = ma.mean(TmpRHN[yy-1,:])
T_arrS[1,:] = ma.mean(np.reshape(tmpT_arr[0,MAM],(nyrs,3)),axis=1)
T_arrS[2,:] = ma.mean(np.reshape(tmpT_arr[0,JJA],(nyrs,3)),axis=1)
T_arrS[3,:] = ma.mean(
|
np.reshape(tmpT_arr[0,SON],(nyrs,3))
|
numpy.reshape
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_logical_slice_assign(test_case, placement, sbp):
input = random_tensor(2, 4, 4, requires_grad=True).oneflow
x_numpy = input.detach().cpu().numpy()
x = (input + 0).to_global(
placement=placement, sbp=sbp
) # add 0 to change to non-leaf tensor
x[:, :2] = 3
# forward
x_numpy[:, :2] = 3
test_case.assertTrue(x.sbp == sbp)
test_case.assertTrue(np.array_equal(x.numpy(), x_numpy))
# backward
x.sum().backward()
input_grad_np = np.ones((4, 4))
input_grad_np[:, :2] = 0
test_case.assertTrue(np.array_equal(input.grad.numpy(), input_grad_np))
def _test_graph_logical_slice_assign(test_case, placement, sbp):
x = random_tensor(2, 4, 4, requires_grad=True).oneflow
x_numpy = x.detach().cpu().numpy()
class LogicalSliceAssignWithGrad(flow.nn.Module):
def __init__(self):
super().__init__()
self.input_grad = flow.nn.Parameter(flow.zeros(4, 4))
def forward(self, input):
x = input + self.input_grad
x = x.to_global(placement, sbp)
x[:, :2] = 3
return x
logical_slice_assign_with_grad = LogicalSliceAssignWithGrad().to_global(
placement, [flow.sbp.broadcast,] * len(sbp)
)
of_sgd = flow.optim.SGD(
logical_slice_assign_with_grad.parameters(), lr=1.0, momentum=0.0
)
class LogicalSliceAssignTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.module = logical_slice_assign_with_grad
self.add_optimizer(of_sgd)
def build(self, x):
out = self.module(x)
z = out.sum()
z.backward()
return out
graph = LogicalSliceAssignTrainGraph()
input = x.to_global(placement=placement, sbp=sbp)
y = graph(input)
test_case.assertTrue(y.sbp == sbp)
# output
x_numpy[:, :2] = 3
test_case.assertTrue(np.array_equal(y.numpy(), x_numpy))
# input_grad
x_grad_np =
|
np.ones((4, 4))
|
numpy.ones
|
import os
import math
import numpy as np
import basis.robot_math as rm
import modeling.model_collection as mc
import modeling.collision_model as cm
import robot_sim._kinematics.jlchain as jl
import robot_sim.manipulators.ur3.ur3 as ur
import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq
# import robot_sim.end_effectors.gripper.robotiq85_gelsight.robotiq85_gelsight as rtq_gs
import robot_sim.end_effectors.gripper.robotiq85_gelsight.robotiq85_gelsight_pusher as rtq_gs
from panda3d.core import CollisionNode, CollisionBox, Point3
import robot_sim.robots.robot_interface as ri
class UR3Dual(ri.RobotInterface):
def __init__(self, pos=np.zeros(3), rotmat=np.eye(3), name='ur3dual', enable_cc=True):
super().__init__(pos=pos, rotmat=rotmat, name=name)
this_dir, this_filename = os.path.split(__file__)
# left side
self.lft_body = jl.JLChain(pos=pos, rotmat=rotmat, homeconf=np.zeros(13), name='lft_body_jl')
self.lft_body.jnts[0]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[1]['loc_pos'] = np.array([-0.0, 0.0, 0.0])
self.lft_body.jnts[2]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[3]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[4]['loc_pos'] =
|
np.array([-0.0, 0.0, 0.0])
|
numpy.array
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import argparse
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.base import BaseEstimator, ClassifierMixin
import sklearn.metrics
import pathlib
import csv
import numpy as np
class ThresholdEstimator(BaseEstimator, ClassifierMixin):
def __init__(self, threshold=0.5):
self.threshold = threshold
def fit(self, X, y):
return
def predict(self, X):
return X >= self.threshold
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--predictions", required=True, type=pathlib.Path)
args = parser.parse_args()
r = csv.DictReader(args.predictions.open(), delimiter="\t")
X = []
y = []
for data in r:
X.append([float(data["prediction"])])
y.append(float(data["label"]))
X = np.asarray(X)
y = np.asarray(y).transpose()
print(X.shape)
print(y.shape)
clf = ThresholdEstimator()
cv = GridSearchCV(clf, {"threshold":
|
np.linspace(0, 1, 100)
|
numpy.linspace
|
'''
Created on Aug 9, 2016
@author: <NAME> <<EMAIL>>
'''
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.ticker import Locator
import six
from six.moves import range
def doublearrow(xs, ys, w=0.1, **kwargs):
""" Plots a double arrow between the two given coordinates """
ax = kwargs.pop('ax', None)
if ax is None:
ax = plt.gca()
# set parameters of the arrow
arrowparams = {
'head_width': 2*w,
'head_length': w,
'length_includes_head': True,
'shape': 'full',
'head_starts_at_zero': False
}
arrowparams.update(kwargs)
# plot two arrows to mimic double arrow
dx = xs[1] - xs[0]
dy = ys[1] - ys[0]
ax.arrow(xs[0], ys[0], dx, dy, **arrowparams)
ax.arrow(xs[1], ys[1], -dx, -dy, **arrowparams)
def log_slope_indicator(xmin=1., xmax=2., factor=None, ymax=None, exponent=1.,
label_x='', label_y='', space=15, loc='lower', ax=None,
debug=False, **kwargs):
"""
Function adding a triangle to axes `ax`. This is useful for indicating
slopes in log-log-plots. `xmin` and `xmax` denote the x-extend of the
triangle. The y-coordinates are calculated according to the formula
y = factor*x**exponent
If supplied, the texts `label_x` and `label_y` are put next to the
catheti. The parameter `loc` determines whether the catheti are
above or below the diagonal. Additionally, kwargs can be used to
set the style of the triangle
`loc` determines whether the triangle appears above (`loc='upper'`) or below
(`loc='lower'; default) the diagonal line.
"""
# prepare the axes and determine
if ax is None:
ax = plt.gca()
if loc == 'lower':
lower = (exponent > 0)
elif loc == 'upper':
lower = (exponent < 0)
else:
raise ValueError('`loc` must be either `lower` or `upper`.')
if ymax is not None:
factor = ymax/max(xmin**exponent, xmax**exponent)
if factor is None:
factor = 1.
# get triangle coordinates
y = factor*np.array((xmin, xmax), np.double)**exponent
if lower:
pts = np.array([[xmin, y[0]], [xmax, y[0]], [xmax, y[1]]])
else:
pts = np.array([[xmin, y[0]], [xmax, y[1]], [xmin, y[1]]])
if debug:
print('The coordinates of the log slope indicator are %s' % pts)
# add triangle to axis
if not('facecolor' in kwargs or 'fc' in kwargs):
kwargs['facecolor'] = 'none'
if not('edgecolor' in kwargs or 'ec' in kwargs):
kwargs['edgecolor'] = 'k'
p = Polygon(pts, closed=True, **kwargs)
ax.add_patch(p)
# add labels
xt = np.exp(0.5*(np.log(xmin) + np.log(xmax)))
# dx = (xmax/xmin)**0.1
yt = np.exp(np.log(y).mean())
# dy = (y[1]/y[0])**0.1
sgn = np.sign(exponent)
if lower:
ax.annotate(
label_x, xy=(xt, y[0]), xytext=(0, -sgn*space),
textcoords='offset points', size='x-small',
horizontalalignment='center',
verticalalignment='top'
)
ax.annotate(
label_y, xy=(xmax, yt), xytext=(space, 0),
textcoords='offset points', size='x-small',
horizontalalignment='right',
verticalalignment='center'
)
else:
ax.annotate(
label_x, xy=(xt, y[1]), xytext=(0, sgn*space),
textcoords='offset points', size='x-small',
horizontalalignment='center',
verticalalignment='bottom'
)
ax.annotate(
label_y, xy=(xmin, yt), xytext=(-space, 0),
textcoords='offset points', size='x-small',
horizontalalignment='left',
verticalalignment='center'
)
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically
"""
self.linthresh = linthresh
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
# iterate through minor locs
minorlocs = []
# handle the lowest part
for i in range(1, len(majorlocs)):
majorstep = majorlocs[i] - majorlocs[i-1]
if abs(majorlocs[i-1] + majorstep/2) < self.linthresh:
ndivs = 10
else:
ndivs = 9
minorstep = majorstep / ndivs
locs = np.arange(majorlocs[i-1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def render_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'],
edge_color='w', bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
"""
Renders the table given in `data` in a matplotlib axes.
Code inspired by http://stackoverflow.com/a/39358722/932593
"""
if ax is None:
size = ((np.array(data.shape[::-1]) + np.array([0, 1])) *
|
np.array([col_width, row_height])
|
numpy.array
|
import os
import torch
import numpy as np
from torch.utils.data import Dataset
class TerraDataset(Dataset):
def __init__(self, data_root, clip_thres, test_flag):
self.samples = []
self.num_normal = 0
self.num_untvbl_obs = 0
self.num_tvbl_obs = 0
self.num_crash = 0
self.num_undefined = 0
self.data_root = data_root
self.clip_thres = clip_thres
self.test_flag = test_flag
self.read_data()
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
lidar_data = torch.from_numpy(self.samples[idx][0]).float()
obs_data = torch.from_numpy(self.samples[idx][1]).float()
system_data = torch.from_numpy(self.samples[idx][2]).float()
label_data = self.samples[idx][3]
return (lidar_data, obs_data, system_data, label_data)
def read_data(self):
'''
We construct a datapoint as (lidar_data, obs_data, system_data, label_data), where
lidar_data - high dimensional input x_h
(obs_data, system_data) - low dimensional input x_l, defined by equation (6) in the paper
label_data - ground truth label y
'''
left_enc_v_index = 40
right_enc_v_index = 41
label_index = -1
count = 0
np.random.seed(0)
map_float = lambda x: np.array(list(map(float, x)))
lidar = os.listdir(self.data_root)[0]
system = os.listdir(self.data_root)[1]
lidar_folder = os.path.join(self.data_root, lidar)
system_folder = os.path.join(self.data_root, system)
for flidar, fsystem in zip(os.listdir(lidar_folder), os.listdir(system_folder)):
flidar_path = os.path.join(lidar_folder, flidar)
fsystem_path = os.path.join(system_folder, fsystem)
fsystem_len = self.file_len(fsystem_path)
with open(flidar_path, 'r') as file_lid, open(fsystem_path, 'r') as file_sys:
for i in range(fsystem_len):
lid_line = file_lid.readline()
dist = lid_line.split(',')
dist = map_float(dist)[1:-1]
clip_dist = np.clip(dist, a_min=0, a_max=self.clip_thres)/self.clip_thres
obs_flag = self.detect_obstacles(dist)
sys_line = file_sys.readline()
sys_data = sys_line.split(',')
enc_left = float(sys_data[left_enc_v_index])
enc_right = float(sys_data[right_enc_v_index])
label = int(sys_data[label_index])
encoders = np.array([enc_left, enc_right])
# under-sampling normal cases and over-sampling anomalies by replicating
if label == 0:
if self.test_flag == 0:
if count % 7 == 0:
self.num_normal += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
count += 1
else:
self.num_normal += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
elif label == 1:
if self.test_flag == 0:
self.num_untvbl_obs += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
for j in range(2):
self.num_untvbl_obs += 1
clip_dist_new, obs_flag_new, encoders_new = self.data_augmentation(
clip_dist, obs_flag, encoders)
self.samples.append([clip_dist_new, obs_flag_new, encoders_new, label])
else:
self.num_untvbl_obs += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
elif label == 2:
if self.test_flag == 0:
self.num_tvbl_obs += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
for j in range(1):
self.num_tvbl_obs += 1
clip_dist_new, obs_flag_new, encoders_new = self.data_augmentation(
clip_dist, obs_flag, encoders)
self.samples.append([clip_dist_new, obs_flag_new, encoders_new, label])
else:
self.num_tvbl_obs += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
elif label == 3:
if self.test_flag == 0:
self.num_crash += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
for j in range(1):
self.num_crash += 1
clip_dist_new, obs_flag_new, encoders_new = self.data_augmentation(
clip_dist, obs_flag, encoders)
self.samples.append([clip_dist_new, obs_flag_new, encoders_new, label])
else:
self.num_crash += 1
self.samples.append([clip_dist, obs_flag, encoders, label])
else:
pass
for j in range(7):
lid_line = file_lid.readline()
def detect_obstacles(self, distance):
obs_flag = np.array([0] * 4)
local_distance = np.clip(distance, a_min=0, a_max=250)/250
obs_flag[0] = local_distance[420:480].mean()
obs_flag[1] = local_distance[480:540].mean()
obs_flag[2] = local_distance[540:600].mean()
obs_flag[3] = local_distance[600:660].mean()
return obs_flag
def data_augmentation(self, clip_dist, obs_flag, encoders):
'''
augment training data with additive Gaussian noise
'''
clip_dist_new = clip_dist + 0.1 * clip_dist * np.random.randn(1080)
clip_dist_new = np.clip(clip_dist_new, a_min=0, a_max=1)
obs_flag_new = obs_flag + 0.4 * obs_flag * np.random.randn(4)
obs_flag_new =
|
np.clip(obs_flag_new, a_min=0, a_max=1)
|
numpy.clip
|
import numpy as np
from numpy.core.numeric import zeros_like
from scipy.ndimage.measurements import center_of_mass
import cv2 as cv
import scipy.ndimage as ndi
from .. import *
def get_morphology(tcfcell:TCFcell):
# get cellmask slice
cellmask = tcfcell['mask']
def _itr_or(array):
# boolean array
z = array.shape[0]
if z == 1:
return np.squeeze(array,0)
if z == 2:
return np.logical_or(array[0,:,:],array[1,:,:],out=array[0,:,:])
else:
zhalf = z//2
return np.logical_or(_itr_or(array[:zhalf]),_itr_or(array[zhalf:]))
cellmask_slice = ndi.binary_fill_holes(_itr_or(cellmask)).astype(np.uint8)
cellmask_slice[cellmask_slice > 0] = 255
# find morphologies
countour, hierarchy = cv.findContours(cellmask_slice,cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
cnt = countour[0]
center_rect,size_rect,angle_rect = cv.minAreaRect(cnt) # angle is in degree
tcfcell['centerR'] = center_rect
tcfcell['sizeR'] = size_rect
tcfcell['angleR'] = angle_rect
if len(cnt) > 5:
ellipse = cv.fitEllipse(cnt)
tcfcell['centerE'] = ellipse[0]
tcfcell['rotE'] = ellipse[2]
tcfcell['widthE'] = ellipse[1][0]
tcfcell['heightE'] = ellipse[1][1]
def get_ellipsoid(tcfcell:TCFcell):
cellmask = tcfcell['mask'].astype(np.uint8)
cellmask[cellmask > 0] = 255
# find contours
points = []
for z in range(cellmask.shape[0]):
cellmask_slice = cellmask[z,...]
contour, hierarchy = cv.findContours(cellmask_slice,cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
if len(contour) == 0:
continue
else:
points_slice = np.empty((contour[0].shape[0],3),dtype=np.uint16)
points_slice[:,0] = z
points_slice[:,1] = contour[0][:,0,1] #y
points_slice[:,2] = contour[0][:,0,0] #x
points.append(points_slice)
points = np.concatenate(points)
center, evecs, radii = ellipsoid_fit(points)
tcfcell['center_Ellipsoid'] = tuple(center)
tcfcell['evecs_Ellipsoid'] = tuple(evecs)
tcfcell['radii_Ellipsoid'] = tuple(radii)
# https://github.com/aleksandrbazhin/ellipsoid_fit_python
def ellipsoid_fit(X):
x = X[:, 0]
y = X[:, 1]
z = X[:, 2]
D = np.array([x * x + y * y - 2 * z * z,
x * x + z * z - 2 * y * y,
2 * x * y,
2 * x * z,
2 * y * z,
2 * x,
2 * y,
2 * z,
1 - 0 * x])
d2 = np.array(x * x + y * y + z * z).T # rhs for LLSQ
u = np.linalg.solve(D.dot(D.T), D.dot(d2))
a = np.array([u[0] + 1 * u[1] - 1])
b =
|
np.array([u[0] - 2 * u[1] - 1])
|
numpy.array
|
#!/usr/bin/env python
import sys
import os
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(file_dir+'/../neural_networks')
import numpy as np
import numpy.matlib
import pickle
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib
import copy
import time
import nn_navigation_value_multi as nn_nav
import nn_rl_multi as nn_rl
import pedData_processing_multi as pedData
import global_var as gb
# setting up global variables
COLLISION_COST = gb.COLLISION_COST
DIST_2_GOAL_THRES = gb.DIST_2_GOAL_THRES
GETTING_CLOSE_PENALTY = gb.GETTING_CLOSE_PENALTY
GETTING_CLOSE_RANGE = gb.GETTING_CLOSE_RANGE
EPS = gb.EPS
# terminal states
NON_TERMINAL = gb.NON_TERMINAL
COLLIDED = gb.COLLIDED
REACHED_GOAL = gb.REACHED_GOAL
# plotting colors
plt_colors = gb.plt_colors
GAMMA = gb.RL_gamma
DT_NORMAL = gb.RL_dt_normal
''' genenerate plots (or instructions for how to generate plots) '''
# need to first generate test cases using gen_results.py
# then generate trajs using rvo (roslaunch rvo_ros rvo_traj_gen_multi.launch)
# then generate trajs using neural nets on the same test cases
# (nn_navigation_value_multi.py)
# plot trajectories to a number of test cases at various training episodes
def plot_training_process(file_dir, format_str, num_agents):
plt.rcParams.update({'font.size': 30})
save_folder_dir = file_dir + "/../../pickle_files/multi/results/figures/"
# plot rvo trajs
# rvo_trajs_filename = file_dir + \
# "/../../pickle_files/multi/results/hard_rvo_trajs_raw.p"
# rvo_trajs = pickle.load(open(rvo_trajs_filename, "rb"))
# for i, traj in enumerate(rvo_trajs):
# nn_nav_multi.plot_traj_raw_multi(traj, '')
# plt.title('')
# file_name = 'rvo_case_'+str(i)+format_str
# plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
# print 'saved', file_name
# plot neural network trajs
test_cases = pickle.load(open(file_dir + \
"/../../pickle_files/multi/results/%d_agents_hard_test_cases.p"%num_agents, "rb"))
iterations = [0, 50, 500, 800, 1000]
# load multiagent neural network
# load nn_rl
mode = 'no_constr'; passing_side = 'right'
for iteration in iterations:
# mode = 'rotate_constr'
filename = "%d_agents_policy_iter_%d.p"%(num_agents, iteration)
# filename=None
NN_value_net_multi = nn_nav.load_NN_navigation_value(file_dir, mode, passing_side, filename=filename)
nn_trajs_filename = file_dir + \
"/../../pickle_files/multi/results/%d_agents_hard_nn_trajs_iter_%d.p"%(num_agents,iteration)
for i, test_case in enumerate(test_cases):
traj, time_to_complete = \
NN_value_net_multi.generate_traj(test_case, figure_name='%_agents_network'%num_agents)
pedData.plot_traj_raw_multi(traj, '', figure_name='training_process')
plt.title('')
file_name = '%d_agents_nn_iter_%d_case_%d'%(num_agents,iteration,i)+format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
print('saved', file_name)
# load training score file and plot training score (value) as a function of episodes
def plot_convergence(file_dir, format_str, num_agents):
plt.rcParams.update({'font.size': 30})
save_folder_dir = file_dir + "/../../pickle_files/multi/results/figures/"
score_fname = file_dir+"/../../pickle_files/multi/no_constr" \
+"/RL_training_score.p"
scores = pickle.load(open(score_fname,"rb"))
stride = 5
fig = plt.figure('training score', figsize=(10,8))
plt.clf()
test_cases = nn_rl.preset_testCases()
episodes = stride * np.arange(len(scores))
num_cases = scores[0].shape[0] / 3
scores_np = np.asarray(scores)
time_vec = scores_np[:,0:num_cases]
collision_vec = scores_np[:,num_cases:2*num_cases]
value_vec = scores_np[:,2*num_cases:3*num_cases]
color_counter = 0
for i in [6,1,7]:
test_case = test_cases[i]
dist_2_goal = np.linalg.norm(test_case[0, 0:2] - test_case[0, 2:4])
upper_bnd = GAMMA ** (dist_2_goal / DT_NORMAL)
color = plt_colors[color_counter]
plt.plot(episodes, value_vec[:,i], c=color, linewidth=2)
# print upper_bnd
# plt.plot(episodes, upper_bnd * np.ones(episodes.shape), \
# c=color, ls='--', linewidth=2)
color_counter += 1
color_counter % 7
# plt.plot(episodes, )
plt.xlabel('episode')
plt.ylabel('value')
# plotting style (only show axis on bottom and left)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# plt.xlim(0,16)
# plt.ylim(0.25,0.5)
plt.draw()
plt.pause(0.0001)
plt.savefig(save_folder_dir+"/convergence"+format_str,bbox_inches='tight')
# plot value function (test case)
# may need to comment out some lines in plot_ped_testCase()
def plot_value_function(file_dir, format_str):
plt.rcParams.update({'font.size': 36})
save_folder_dir = file_dir + "/../../pickle_files/multi/results/figures/"
# define test case
dist_to_goal = 2.5; pref_speed = 1.0; cur_speed= 1.0; cur_heading = np.pi/5.0;
other_vx = 0.0; other_vy = 1.0; rel_pos_x = 1.5; rel_pos_y = -0.8;
self_radius = 0.3; other_radius = 0.3;
vx = pref_speed * np.cos(cur_heading);
vy = pref_speed * np.sin(cur_heading);
dist_2_other = np.sqrt(np.array([rel_pos_x, rel_pos_y])) - \
self_radius-other_radius
x = [dist_to_goal, pref_speed, cur_speed, cur_heading, \
other_vx, other_vy, rel_pos_x, rel_pos_y, self_radius, \
other_radius, self_radius+other_radius, vx, vy, dist_2_other]
y = 0.5
# load 2 agent 'no rotate' neural network
mode = 'no_constr'; passing_side = 'right'
iteration = 1000
filename = "twoAgents_policy_iter_%d.p"%iteration
nn_navigation = nn_nav.load_NN_navigation_value(file_dir, mode, passing_side, filename)
nn_navigation.plot_ped_testCase(x, y, ' ', \
'test_case in no_constr')
plt.subplot(121); plt.title('')
plt.subplot(122); plt.title('')
fig = plt.gcf()
fig.tight_layout()
file_name = 'value_func_no_constr' + format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
# load 2 agent 'no rotate' neural network
mode = 'rotate_constr'; passing_side = 'right'
iteration = 500
filename = "twoAgents_policy_iter_%d.p"%iteration
nn_navigation = nn_nav.load_NN_navigation_value(file_dir, mode, passing_side, filename)
nn_navigation.plot_ped_testCase(x, y, ' ', \
'test_case rotate_constr')
plt.subplot(121); plt.title('')
plt.subplot(122); plt.title('')
fig = plt.gcf()
fig.tight_layout()
file_name = 'value_func_rotate_constr' + format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
def plot_multi_agent_cases(file_dir, format_str):
plt.rcParams.update({'font.size': 28})
save_folder_dir = file_dir + "/../../pickle_files/multi/results/figures/"
# load multiagent neural network
# load nn_rl
# mode = 'no_constr'
mode = 'rotate_constr'; passing_side = 'right'
iteration = 1000
filename = "twoAgents_policy_iter_%d.p"%iteration
# filename=None
value_net = nn_nav.load_NN_navigation_value(file_dir, mode, passing_side, filename)
NN_navigation_multi = nn_nav_multi.NN_navigation_value_multi(value_net)
# six agent swap
test_cases = nn_nav_multi.preset_testCases()
traj_raw_multi, time_to_complete = \
NN_navigation_multi.generate_traj(test_cases[2], figure_name='method 1', method=1)
# raw_input()
plt.title('')
file_name = 'multi_traj_0' + format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
traj_raw_multi, time_to_complete = \
NN_navigation_multi.generate_traj(test_cases[3], figure_name='method 1', method=1)
# raw_input()
plt.title('')
file_name = 'multi_traj_1' + format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
# random test cases
for i in xrange(2,10):
# np.random.seed(seed)
# seed+=1
# print 'seed', seed
# is_end_near_bnd = np.random.binomial(1, 0.5)
num_agents = 4
side_length = 3
test_case = nn_nav_multi.generate_rand_test_case_multi( num_agents, side_length,\
np.array([0.5, 1.2]), \
np.array([0.3, 0.5]), is_end_near_bnd=True)
traj_raw_multi, time_to_complete = \
NN_navigation_multi.generate_traj(test_case, figure_name='method 1', method=1)
plt.title('')
file_name = 'multi_traj_%d'%i + format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
print('generated traj %d' %i)
raw_input()
pass
# may need to change color setting in global_var.py
# change all except the last one
def plot_static_case(file_dir, format_str):
plt.rcParams.update({'font.size': 34})
save_folder_dir = file_dir + "/../../pickle_files/multi/results/figures/"
# load multiagent neural network
# load nn_rl
# mode = 'no_constr'; passing_side = 'right'
mode = 'rotate_constr'; passing_side = 'right'
iteration = 1000
filename = "twoAgents_policy_iter_%d.p"%iteration
# filename=None
value_net = nn_nav.load_NN_navigation_value(file_dir, mode, passing_side, filename)
NN_navigation_multi = nn_nav_multi.NN_navigation_value_multi(value_net)
test_case = np.array([[-3.0, -3.0, 3.0, 3.0, 1.0, 0.3],\
[-2.0, -2.0, -2.0, -2.0, 1.0, 0.42],\
[-3.0, -0.0, -3.0, -0.0, 1.0, 0.4],\
[-1.5, 3.0, -1.5, 3.0, 1.0, 0.5],\
[0.0, -0.5, 0.0, -0.5, 1.0, 0.4],\
[0.5, 2.0, 0.5, 2.0, 1.0, 0.5],\
[0.5, -1.8, 0.5, -1.8, 1.0, 0.41],\
[3.0, 0.0, 3.0, 0.0, 1.0, 0.36],\
[2.0, -3.0, 2.0, -3.0, 1.0, 0.37]])
traj_raw_multi, time_to_complete = \
NN_navigation_multi.generate_traj(test_case, figure_name='method 2', method=1)
plt.title('')
plt.locator_params(axis='y',nbins=4)
plt.locator_params(axis='x',nbins=6)
file_name = 'multi_traj_static' + format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
def plot_non_coop_case(file_dir, format_str):
plt.rcParams.update({'font.size': 34})
save_folder_dir = file_dir + "/../../pickle_files/multi/results/figures/"
# load multiagent neural network
# load nn_rl
mode = 'no_constr'; passing_side = 'right'
# mode = 'rotate_constr'; passing_side = 'right'
iteration = 1000
filename = "twoAgents_policy_iter_%d.p"%iteration
# filename=None
value_net = nn_nav.load_NN_navigation_value(file_dir, mode, passing_side, filename)
NN_navigation_multi = nn_nav_multi.NN_navigation_value_multi(value_net)
test_case = np.array([[-3.0, 0.0, 3.0, 0.0, 1.0, 0.5],\
[3.0, 0.0, -3.0, 0.0, 1.0, 0.5]])
traj_raw_multi, time_to_complete = \
NN_navigation_multi.generate_traj(test_case, figure_name='method 2', method=1)
plt.title('')
plt.locator_params(axis='y',nbins=5)
plt.locator_params(axis='x',nbins=5)
file_name = 'multi_traj_non_coop' + format_str
plt.savefig(save_folder_dir+file_name,bbox_inches='tight')
pass
def generate_trajs_for_comparison_cases(file_dir, format_str):
save_folder_dir = file_dir + "/../../pickle_files/multi/results/figures/"
# generate test cases
num_agents_vec = [2, 4 ,6, 8]
side_length_vec = [2.0, 2.5, 3.0, 3.5]
num_test_cases = 100
for i, num_agents in enumerate(num_agents_vec):
|
np.random.seed(1)
|
numpy.random.seed
|
import numpy as np
import numpy.testing as npt
import unittest
import wisdem.rotorse.rotor_aeropower as ra
import openmdao.api as om
import copy
import time
import os
ARCHIVE = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + 'regulation.npz'
class TestRotorAero(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
def testRegulationTrajectory(self):
# Load in airfoil and blade shape inputs for NREL 5MW
npzfile = np.load(ARCHIVE)
self.inputs['airfoils_aoa'] = npzfile['aoa']
self.inputs['airfoils_Re'] = npzfile['Re']
self.inputs['airfoils_cl'] = npzfile['cl']
self.inputs['airfoils_cd'] = npzfile['cd']
self.inputs['airfoils_cm'] = npzfile['cm']
self.inputs['r'] = npzfile['r']
self.inputs['chord'] = npzfile['chord']
self.inputs['theta'] = npzfile['theta']
naero = self.inputs['r'].size
n_aoa_grid = self.inputs['airfoils_aoa'].size
n_Re_grid = self.inputs['airfoils_Re'].size
n_pc = 22
# parameters
self.inputs['control_Vin'] = 4.
self.inputs['control_Vout'] = 25.
self.inputs['control_ratedPower'] = 5e6
self.inputs['control_minOmega'] = 0.0
self.inputs['control_maxOmega'] = 100.0
self.inputs['control_maxTS'] = 90.
self.inputs['control_tsr'] = 10.
self.inputs['control_pitch'] = 0.0
self.discrete_inputs['drivetrainType'] = 'GEARED'
self.inputs['drivetrainEff'] = 0.95
self.inputs['Rhub'] = 1.
self.inputs['Rtip'] = 70.
self.inputs['hub_height'] = 100.
self.inputs['precone'] = 0.
self.inputs['tilt'] = 0.
self.inputs['yaw'] = 0.
self.inputs['precurve'] = np.zeros(naero)
self.inputs['precurveTip'] = 0.
self.inputs['presweep'] = np.zeros(naero)
self.inputs['presweepTip'] = 0.
self.discrete_inputs['nBlades'] = 3
self.inputs['rho'] = 1.225
self.inputs['mu'] = 1.81206e-5
self.inputs['shearExp'] = 0.25
self.discrete_inputs['nSector'] = 4
self.discrete_inputs['tiploss'] = True
self.discrete_inputs['hubloss'] = True
self.discrete_inputs['wakerotation'] = True
self.discrete_inputs['usecd'] = True
myobj = ra.RegulatedPowerCurve(naero=naero, n_aoa_grid=n_aoa_grid, n_Re_grid=n_Re_grid, n_pc=n_pc, n_pc_spline=n_pc,
regulation_reg_II5=True, regulation_reg_III=True)
myobj.naero = naero
# All reg 2: no maxTS, no max rpm, no power limit
self.inputs['control_maxOmega'] = 1e3
self.inputs['control_maxTS'] = 1e5
self.inputs['control_ratedPower'] = 1e16
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
V_expect0 = np.linspace(4, 25, n_pc)
V_expect1 = V_expect0.copy()
#V_expect1[7] = self.outputs['rated_V']
Omega_tsr = V_expect1*10*60/70./2./np.pi
npt.assert_equal(self.outputs['V'], V_expect1)
npt.assert_equal(self.outputs['V_spline'], V_expect0)
npt.assert_allclose(self.outputs['Omega'], Omega_tsr)
npt.assert_equal(self.outputs['pitch'], np.zeros( V_expect0.shape ) )
npt.assert_equal(self.outputs['Cp'], self.outputs['Cp_aero']*0.95)
npt.assert_allclose(self.outputs['Cp'], self.outputs['Cp'][0])
npt.assert_allclose(self.outputs['Cp_aero'], self.outputs['Cp_aero'][0])
myCp = self.outputs['P']/(0.5*1.225*V_expect1**3.*np.pi*70**2)
npt.assert_allclose(myCp, myCp[0])
self.assertGreater(myCp[0], 0.4)
self.assertGreater(0.5, myCp[0])
npt.assert_allclose(myCp, self.outputs['Cp'])
npt.assert_array_less(self.outputs['P'][:-1], self.outputs['P'][1:])
npt.assert_array_less(self.outputs['Q'][:-1], self.outputs['Q'][1:])
npt.assert_array_less(self.outputs['T'][:-1], self.outputs['T'][1:])
self.assertEqual(self.outputs['rated_V'], V_expect1[-1])
self.assertAlmostEqual(self.outputs['rated_Omega'], Omega_tsr[-1])
self.assertEqual(self.outputs['rated_pitch'], 0.0)
# Test no maxTS, max rpm, no power limit
self.inputs['control_maxOmega'] = 15.0
self.inputs['control_maxTS'] = 1e5
self.inputs['control_ratedPower'] = 1e16
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
V_expect0 = np.linspace(4, 25, n_pc)
V_expect1 = V_expect0.copy()
#V_expect1[7] = 15.*70*2*np.pi/(10.*60.)
Omega_tsr = V_expect1*10*60/70./2./np.pi
Omega_expect = np.minimum(Omega_tsr, 15.0)
npt.assert_allclose(self.outputs['V'], V_expect1)
npt.assert_equal(self.outputs['V_spline'], V_expect0)
npt.assert_allclose(self.outputs['Omega'], Omega_expect)
npt.assert_equal(self.outputs['pitch'][:7], 0.0 )
npt.assert_array_less(0.0, np.abs(self.outputs['pitch'][7:]))
npt.assert_equal(self.outputs['Cp'], self.outputs['Cp_aero']*0.95)
npt.assert_array_less(self.outputs['P'][:-1], self.outputs['P'][1:])
npt.assert_array_less(self.outputs['Q'][:-1], self.outputs['Q'][1:])
npt.assert_array_less(self.outputs['T'][:-1], self.outputs['T'][1:])
self.assertAlmostEqual(self.outputs['rated_V'], V_expect1[-1], 3)
self.assertAlmostEqual(self.outputs['rated_Omega'], 15.0)
self.assertGreater(self.outputs['rated_pitch'], 0.0)
myCp = self.outputs['P']/(0.5*1.225*V_expect1**3.*np.pi*70**2)
npt.assert_allclose(myCp[:7], myCp[0])
npt.assert_allclose(myCp[:7], self.outputs['Cp'][:7])
# Test maxTS, no max rpm, no power limit
self.inputs['control_maxOmega'] = 1e3
self.inputs['control_maxTS'] = 105.0
self.inputs['control_ratedPower'] = 1e16
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
V_expect0 = np.linspace(4, 25, n_pc)
V_expect1 = V_expect0.copy()
#V_expect1[7] = 105./10.
Omega_tsr = V_expect1*10*60/70./2./np.pi
Omega_expect = np.minimum(Omega_tsr, 105./70./2/np.pi*60)
npt.assert_allclose(self.outputs['V'], V_expect1)
npt.assert_equal(self.outputs['V_spline'], V_expect0)
npt.assert_allclose(self.outputs['Omega'], Omega_expect)
npt.assert_equal(self.outputs['pitch'][:7], 0.0 )
npt.assert_array_less(0.0, np.abs(self.outputs['pitch'][7:]))
npt.assert_equal(self.outputs['Cp'], self.outputs['Cp_aero']*0.95)
npt.assert_array_less(self.outputs['P'][:-1], self.outputs['P'][1:])
npt.assert_array_less(self.outputs['Q'][:-1], self.outputs['Q'][1:])
npt.assert_array_less(self.outputs['T'][:-1], self.outputs['T'][1:])
self.assertEqual(self.outputs['rated_V'], V_expect1[-1])
self.assertAlmostEqual(self.outputs['rated_Omega'], Omega_expect[-1])
self.assertGreater(self.outputs['rated_pitch'], 0.0)
myCp = self.outputs['P']/(0.5*1.225*V_expect1**3.*np.pi*70**2)
npt.assert_allclose(myCp[:7], myCp[0])
npt.assert_allclose(myCp[:7], self.outputs['Cp'][:7])
# Test no maxTS, no max rpm, power limit
self.inputs['control_maxOmega'] = 1e3
self.inputs['control_maxTS'] = 1e4
self.inputs['control_ratedPower'] = 5e6
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
V_expect0 = np.linspace(4, 25, n_pc)
V_expect1 = V_expect0.copy()
V_expect1[7] = self.outputs['rated_V']
Omega_tsr = V_expect1*10*60/70./2./np.pi
Omega_expect = np.minimum(Omega_tsr, self.outputs['rated_Omega'])
npt.assert_allclose(self.outputs['V'], V_expect1)
npt.assert_equal(self.outputs['V_spline'], V_expect0)
npt.assert_allclose(self.outputs['Omega'], Omega_expect)
npt.assert_equal(self.outputs['pitch'][:7], 0.0 )
npt.assert_array_less(0.0, np.abs(self.outputs['pitch'][8:]))
npt.assert_equal(self.outputs['Cp'], self.outputs['Cp_aero']*0.95)
npt.assert_array_less(self.outputs['P'][:7], self.outputs['P'][1:8])
npt.assert_allclose(self.outputs['P'][7:], 5e6, rtol=1e-4, atol=0)
#npt.assert_array_less(self.outputs['Q'], self.outputs['Q'][1:])
npt.assert_array_less(self.outputs['T'], self.outputs['T'][7]+1e-1)
#self.assertEqual(self.outputs['rated_V'], V_expect1[-1])
self.assertAlmostEqual(self.outputs['rated_Omega'], Omega_expect[-1])
self.assertEqual(self.outputs['rated_pitch'], 0.0)
myCp = self.outputs['P']/(0.5*1.225*V_expect1**3.*np.pi*70**2)
npt.assert_allclose(myCp[:7], myCp[0])
npt.assert_allclose(myCp[:7], self.outputs['Cp'][:7])
# Test min & max rpm, no power limit
self.inputs['control_minOmega'] = 7.0
self.inputs['control_maxOmega'] = 15.0
self.inputs['control_maxTS'] = 1e5
self.inputs['control_ratedPower'] = 1e16
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
V_expect0 = np.linspace(4, 25, n_pc)
V_expect1 = V_expect0.copy()
#V_expect1[7] = 15.*70*2*np.pi/(10.*60.)
Omega_tsr = V_expect1*10*60/70./2./np.pi
Omega_expect = np.maximum( np.minimum(Omega_tsr, 15.0), 7.0)
npt.assert_allclose(self.outputs['V'], V_expect1)
npt.assert_equal(self.outputs['V_spline'], V_expect0)
npt.assert_allclose(self.outputs['Omega'], Omega_expect)
npt.assert_array_less(0.0, np.abs(self.outputs['pitch'][Omega_expect != Omega_tsr]) )
npt.assert_equal(self.outputs['Cp'], self.outputs['Cp_aero']*0.95)
npt.assert_array_less(self.outputs['P'][:-1], self.outputs['P'][1:])
npt.assert_array_less(self.outputs['Q'][:-1], self.outputs['Q'][1:])
npt.assert_array_less(self.outputs['T'][:-1], self.outputs['T'][1:])
self.assertEqual(self.outputs['rated_V'], V_expect1[-1])
self.assertAlmostEqual(self.outputs['rated_Omega'], 15.0)
self.assertGreater(self.outputs['rated_pitch'], 0.0)
myCp = self.outputs['P']/(0.5*1.225*V_expect1**3.*np.pi*70**2)
npt.assert_allclose(myCp[Omega_expect == Omega_tsr], myCp[6])
npt.assert_allclose(myCp[Omega_expect == Omega_tsr], self.outputs['Cp'][Omega_expect == Omega_tsr])
# Test fixed pitch
self.inputs['control_minOmega'] = 0.0
self.inputs['control_maxOmega'] = 15.0
self.inputs['control_maxTS'] = 1e5
self.inputs['control_ratedPower'] = 1e16
self.inputs['control_pitch'] = 5.0
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
V_expect0 = np.linspace(4, 25, n_pc)
V_expect1 = V_expect0.copy()
#V_expect1[7] = 15.*70*2*np.pi/(10.*60.)
Omega_tsr = V_expect1*10*60/70./2./np.pi
Omega_expect = np.minimum(Omega_tsr, 15.0)
npt.assert_allclose(self.outputs['V'], V_expect1)
npt.assert_equal(self.outputs['V_spline'], V_expect0)
npt.assert_allclose(self.outputs['Omega'], Omega_expect)
npt.assert_equal(self.outputs['pitch'][:7], 5.0 )
npt.assert_array_less(0.0, np.abs(self.outputs['pitch'][7:]))
npt.assert_equal(self.outputs['Cp'], self.outputs['Cp_aero']*0.95)
npt.assert_array_less(self.outputs['P'][:-1], self.outputs['P'][1:])
npt.assert_array_less(self.outputs['Q'][:-1], self.outputs['Q'][1:])
npt.assert_array_less(self.outputs['T'][:-1], self.outputs['T'][1:])
self.assertAlmostEqual(self.outputs['rated_V'], V_expect1[-1], 3)
self.assertAlmostEqual(self.outputs['rated_Omega'], 15.0)
self.assertGreater(self.outputs['rated_pitch'], 5.0)
myCp = self.outputs['P']/(0.5*1.225*V_expect1**3.*np.pi*70**2)
npt.assert_allclose(myCp[:7], myCp[0])
npt.assert_allclose(myCp[:7], self.outputs['Cp'][:7])
def testRegulationTrajectoryNoRegion3(self):
# Load in airfoil and blade shape inputs for NREL 5MW
npzfile = np.load(ARCHIVE)
self.inputs['airfoils_aoa'] = npzfile['aoa']
self.inputs['airfoils_Re'] = npzfile['Re']
self.inputs['airfoils_cl'] = npzfile['cl']
self.inputs['airfoils_cd'] = npzfile['cd']
self.inputs['airfoils_cm'] = npzfile['cm']
self.inputs['r'] = npzfile['r']
self.inputs['chord'] = npzfile['chord']
self.inputs['theta'] = npzfile['theta']
naero = self.inputs['r'].size
n_aoa_grid = self.inputs['airfoils_aoa'].size
n_Re_grid = self.inputs['airfoils_Re'].size
n_pc = 22
# parameters
self.inputs['control_Vin'] = 4.
self.inputs['control_Vout'] = 25.
self.inputs['control_ratedPower'] = 5e6
self.inputs['control_minOmega'] = 0.0
self.inputs['control_maxOmega'] = 100.0
self.inputs['control_maxTS'] = 90.
self.inputs['control_tsr'] = 10.
self.inputs['control_pitch'] = 0.0
self.discrete_inputs['drivetrainType'] = 'GEARED'
self.inputs['drivetrainEff'] = 0.95
self.inputs['Rhub'] = 1.
self.inputs['Rtip'] = 70.
self.inputs['hub_height'] = 100.
self.inputs['precone'] = 0.
self.inputs['tilt'] = 0.
self.inputs['yaw'] = 0.
self.inputs['precurve'] =
|
np.zeros(naero)
|
numpy.zeros
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import random
from op_test import OpTest
def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
if len(gt_lod) != len(neg_lod):
raise AssertionError("The input arguments are illegal.")
batch_size = len(gt_lod) - 1
match_indices = -1 * np.ones((batch_size, num_prior)).astype('int32')
neg_indices = np.zeros((neg_lod[-1], 1)).astype('int32')
for n in range(batch_size):
gt_num = gt_lod[n + 1] - gt_lod[n]
ids = random.sample([i for i in range(num_prior)], gt_num)
match_indices[n, ids] = [i for i in range(gt_num)]
ret_ids = set([i for i in range(num_prior)]) - set(ids)
s = neg_lod[n]
e = neg_lod[n + 1]
l = e - s
neg_ids = random.sample(ret_ids, l)
neg_indices[s:e, :] =
|
np.array(neg_ids)
|
numpy.array
|
from abc import abstractmethod
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import csv
# ********* Geometric *********
class Shape:
"""
An abstract class for geometric shapes defining some key methods required
"""
@abstractmethod
def get_perimeter(self, start, end, num_points):
"""
Create a list of points between the user defined start and end positions on the perimeter of the shape
:param start: Position at which the list of points should begin
:type start: float
:param end: Position at which the list of points should end
:type end: float
:param num_points: Number of points
:type num_points: int
:return: A list of points (x,y) evenly spaced on the perimeter of the shape between the start and end positions
:rtype: numpy.ndarray
"""
pass
@abstractmethod
def get_grid(self, spacing):
"""
Create a grid of points spaced uniformly across the shape
:param spacing: Spacing between points in the grid
:type spacing: float
:return: A list of points (x,y) uniformly space across the shape
:rtype: numpy.ndarray
"""
pass
@abstractmethod
def is_point_inside(self, point):
"""
Check whether or not a point is inside the shape
:param point: list/tuple of the coordinates (x, y) of a point
:type point: list
:return: A bool stating whether or not the point is within the shape
:rtype: bool
"""
pass
class Circle(Shape):
"""
A geometric class for a circle
Attributes
----------
centre : list
A list of coordinates (x, y) describing the centre of the circle
radius : float
The radius of the circle
"""
def __init__(self, centre, radius):
"""
Creates a circle
:param centre: The coordinates (x,y) of centre of the circle
:type centre: list
:param radius: The radius of the circle
:type radius: float
:rtype: Circle
"""
self._centre = centre
self._radius = radius
@property
def centre(self):
"""
A list of coordinates (x, y) describing the centre of the circle
:return: (x, y) of the centre of the circle
:rtype: list
"""
return self._centre
@property
def radius(self):
"""
The radius of the circle
:return: The radius
:rtype: float
"""
return self._radius
def get_circular_points(self, start_angle, end_angle, num_points, radius, decimal_places=None):
"""
Create a list of points between the user defined start and end angles (in degrees) on the perimeter of a new circle sharing
the centre point of this circle with a different radius
:param start_angle: Position at which the list of points should begin
:type start_angle: float
:param end_angle: Position at which the list of points should end
:type end_angle: float
:param num_points: Number of points
:type num_points: int
:param radius: Radius of the circle on which the points are placed
:type radius: float
:param decimal_places: Number of decimal places the coordinates are returned with - None: there is no rounding
:type decimal_places: int
:return: An array of points (x,y) evenly spaced on the perimeter of the new circle between the start and end angles
:rtype: numpy.ndarray
"""
points = np.zeros((num_points, 2), float)
full_angle = 180 - abs(abs(end_angle - start_angle) - 180)
if full_angle == 0:
full_angle = 360
delta_angle = full_angle / num_points
for i in range(num_points):
points[i][0] = self._centre[0] + np.cos(np.radians(90 + start_angle + delta_angle * i)) * radius
points[i][1] = self._centre[1] + np.sin(np.radians(90 + start_angle + delta_angle * i)) * radius
if decimal_places is not None:
return np.array(np.around(points, decimal_places))
else:
return np.array(points)
def get_perimeter(self, start_angle, end_angle, num_points, decimal_places=None):
"""
Create a list of points between the user defined start and end angles on the perimeter of the circle
:param start_angle: Position at which the list of points should begin
:type start_angle: float
:param end_angle: Position at which the list of points should end
:type end_angle: float
:param num_points: Number of points
:type num_points: int
:param decimal_places: Number of decimal places the coordinates are returned with - None: there is no rounding
:type decimal_places: int
:return: A list of points (x,y) evenly spaced on the perimeter of the shape between the start and end angles
:rtype: numpy.ndarray
"""
return np.array(self.get_circular_points(start_angle, end_angle, num_points, self._radius, decimal_places))
def get_grid(self, spacing, alpha=2):
"""
Create a grid of points spaced uniformly across the circle using the sunflower seed arrangement algorithm
:param spacing: Approximate spacing between points in the grid
:type spacing: float
:param alpha: Determines the evenness of the boundary - 0 is jagged, 2 is smooth. Above 2 is not recommended
:type alpha: float
:return: A list of points (x,y) uniformly spaced across the circle
:rtype: numpy.ndarray
"""
# Algorithm is found at the stack overflow thread linked below:
# https://stackoverflow.com/questions/28567166/uniformly-distribute-x-points-inside-a-circle
# Calculates the number of points (n) from the spacing
area = np.pi * self._radius**2
n = int(area / spacing**2)
points = np.zeros((n, 2), float)
b = int(alpha * np.sqrt(n)) # number of boundary points
golden_ratio = (np.sqrt(5) + 1) / 2
for point in range(1, n + 1):
if point > n - b:
r = 1
else:
r = np.sqrt(point - 1 / 2) / np.sqrt(n - (b + 1) / 2)
theta = 2 * np.pi * point / golden_ratio**2
points[point - 1][0] = self._centre[0] + r*np.cos(theta) * self._radius
points[point - 1][1] = self._centre[1] + r*np.sin(theta) * self._radius
return np.array(points)
def is_point_inside(self, point):
"""
Check whether or not a point is inside the circle
:param point: List/tuple of the coordinates (x, y) of a point
:type point: list
:return: A bool stating whether or not the point is within the circle
:rtype: bool
"""
# checks if the distance from the centre of the circle to the point, d, is less than or equal to the radius
d = np.sqrt((point[0] - self.centre[0])**2 + (point[1] - self.centre[1])**2)
return d <= self.radius
class Rectangle(Shape):
"""
A geometric class for a rectangle
Attributes
----------
coordinate : list
A list of coordinates (x, y) describing the centre or bottom left of the rectangle
width : float
The width of the rectangle
height : float
The height of the rectangle
coordinate_pos : str
Describes the position of the coordinate parameter - either "centre" or "bottom left"
"""
def __init__(self, coordinate, width, height, coordinate_pos="bottom left"):
"""
Creates a rectangle
:param coordinate: A list of coordinates (x, y) describing the centre or bottom left of the rectangle
:type coordinate: list
:param width: The width of the rectangle
:type width: float
:param height: The height of the rectangle
:type height: float
:param coordinate_pos: Description of the position of the coordinate - "centre" or "bottom left" of the rectangle
:type coordinate_pos: str
:rtype: Rectangle
"""
if coordinate_pos == 'centre':
self._xy = [coordinate[0] - width / 2, coordinate[1] - height / 2]
elif coordinate_pos == 'bottom left':
self._xy = coordinate
else:
print("coordinate_pos must be in \"centre\" or \"bottom left\"")
quit(1)
self._width = width
self._height = height
@property
def xy(self):
"""
A list of coordinates (x, y) describing the bottom left of the rectangle
:return: (x, y) of the bottom left of the rectangle
:rtype: list
"""
return self._xy
@property
def width(self):
"""
The width of the rectangle
:return: The width
:rtype: float
"""
return self._width
@property
def height(self):
"""
The height of the rectangle
:return: The height
:rtype: float
"""
return self._height
def get_perimeter(self, start_point, end_point, num_points):
pass
def get_grid(self, spacing):
"""
Create a grid of points spaced uniformly across the rectangle
:param spacing: Approximate spacing between points in the grid
:type spacing: float
:return: A list of points (x,y) uniformly spaced across the rectangle
:rtype: numpy.ndarray
"""
num_x = int(np.floor(self._width / spacing)) + 1
num_y = int(np.floor(self._height / spacing)) + 1
num_points = int(num_x * num_y)
points = np.zeros((num_points, 2), float)
for x in range(num_x):
for y in range(num_y):
points[y * num_x + x][0] = self._xy[0] + x * spacing
points[y * num_x + x][1] = self._xy[1] + y * spacing
return np.array(points)
def is_point_inside(self, point):
"""
Check whether or not a point is inside the rectangle
:param point: list/tuple of the coordinates (x, y) of a point
:type point: tuple
:return: A bool stating whether or not the point is within the rectangle
:rtype: bool
"""
# checks that the x and y distance between the point and the bottom_left of the rectangle is less than the
# width and height
dx = abs(point[0] - self._xy[0]) + abs(self._xy[0] + self._width - point[0])
dy = abs(point[1] - self._xy[1]) + abs(self._xy[1] + self._height - point[1])
return dy <= self._height and dx <= self._width
# ********* PyZones specific setup classes *********
class Zone(Circle):
"""
A sound zone to be used in setup of the soundfield's geometry
Attributes
----------
centre : list
A list of coordinates (x, y) describing the centre of the circle
radius : float
The radius of the circle
colour : list
A list of float values (r, g, b)
"""
def __init__(self, centre, radius, colour=None):
"""
Creates a sound zone
:param centre: A list of coordinates (x, y) describing the centre of the circle
:type centre: list
:param radius: The radius of the circle
:type radius: float
:param colour: A list of float values (r, g, b) - None results in black (0, 0, 0)
:type colour: list
:rtype: Zone
"""
if colour is None:
self._colour = [0, 0, 0]
else:
self._colour = colour
Circle.__init__(self, centre, radius)
@property
def colour(self):
"""
A list of float values (r, g, b)
:return: A list of float values (r, g, b)
:rtype: list
"""
return self._colour
class Soundfield(Rectangle):
"""
The soundfield being used in the simulation. Can be thought of as the room, however no room reflections are modelled
Attributes
----------
_zones : list
A list of the zones used in the simulation.
_fig : float
The figure from the matplotlib.pyplot
_axes : float
The axes from the matplotlib.pyplot
"""
def __init__(self, coordinate, width, height, coordinate_pos="bottom left"):
"""
Creates a soundfield to be used for simulations. This class is exclusively for the graphics and visualisations
:param coordinate: A list of coordinates (x, y) describing the centre or bottom left of the rectangle
:type coordinate: list
:param width: The width of the rectangle
:type width: float
:param height: The height of the rectangle
:type height: float
:param coordinate_pos: The position of the coordinate - "centre" or "bottom left" of the rectangle
:type coordinate_pos: str
:rtype: Soundfield
"""
Rectangle.__init__(self, coordinate, width, height, coordinate_pos=coordinate_pos)
self._zones = []
self._fig = plt.figure(figsize=(6, 6), dpi=300)
self._axes = self._fig.add_subplot(111)
self._axes.set_xlim([self.xy[0], self.xy[0] + width])
self._axes.set_ylim([self.xy[1], self.xy[1] + height])
self._cax = self._fig.add_axes([0.125, 0.94, 0.775, 0.04])
def add_zones(self, zones):
"""
Add the sound zone(s) to the soundfield such that they can be seen in the visualisations of the soundfield
:param zones: The zone(s) to be added to the soundfield
:type zones: list[Zone]
"""
if type(zones) is not list:
zones = [zones]
for zone in zones:
circle = plt.Circle(zone.centre, zone.radius, fill=False)
circle.set_edgecolor(zone.colour)
self._axes.add_patch(circle)
self._zones.append(zone)
def add_sound_objects(self, *args):
"""
Add the sound objects to the soundfield such that they can be seen in the visualisations of the soundfield
:param args: a single Microphone/Loudspeaker or a MicrophoneArray/LoudspeakerArray
"""
def add_ls(ls):
centre = ls.position
x = centre[0] - (ls.width / 2)
y = centre[1] - (ls.height / 2)
angle = 0
# change the orientation of the loudspeaker such that it's looking at a point (purely aesthetic)
if ls.look_at is not None:
x_dif = ls.look_at[0] - centre[0]
y_dif = ls.look_at[1] - centre[1]
if x_dif == 0:
angle = 0
elif y_dif == 0:
angle = np.pi / 2
elif x_dif > 0:
angle = np.arctan(y_dif / x_dif) - np.pi / 2
else:
angle = np.arctan(y_dif / x_dif) + np.pi / 2
new_x = (x - centre[0]) * np.cos(angle) - (y - centre[1]) * np.sin(angle) + centre[0]
new_y = (x - centre[0]) * np.sin(angle) + (y - centre[1]) * np.cos(angle) + centre[1]
x = new_x
y = new_y
rect = plt.Rectangle((x, y), ls.width, ls.height, angle=np.rad2deg(angle), fill=False)
rect.set_edgecolor(ls.colour)
self._axes.add_patch(rect)
def add_mic(mic):
circle = plt.Circle(mic.position, mic.radius, fill=False)
circle.set_edgecolor(mic.colour)
self._axes.add_patch(circle)
for s_object in args:
if isinstance(s_object, Loudspeaker):
add_ls(s_object)
elif isinstance(s_object, LoudspeakerArray):
for item in s_object:
add_ls(item)
elif isinstance(s_object, Microphone):
add_mic(s_object)
elif isinstance(s_object, MicrophoneArray):
for item in s_object:
add_mic(item)
else:
"Please input a Microphone/Loudspeaker or MicrophoneArray/LoudspeakerArray to add."
return
def clear_graphs(self):
"""
Clear the SoundObjects and Zones from the visualisations
"""
self._axes.clear()
def plot_geometry(self, graph_name):
"""
Plot the geometry of the soundfield with any Zones or SoundObjects added
:param graph_name: The name and file location of the graph
:type graph_name: str
"""
self._axes.plot()
self._fig.savefig(graph_name)
def visualise(self, sim, graph_name, frequency=500, sf_spacing=0.1, zone_spacing=0.05, zone_alpha=2, transfer_functions=None, grid=None):
"""
Create a visualisation of the Soundfield at the given frequency. The frequency chosen must have been present in
the simulation provided. Transfer functions and visualisation micr positions can be provided to prevent them
being calculated more than once. Should the same frequency, loudspeakers and visualisation microphone positions
be kept the same, the returned transfer functions and microphone positions can be used again. The filter weights
used will be those most recently calculated in the simulation.
:param sim: Simulation for which the visualation is made - contains the filter weights.
:type sim: Simulation
:param graph_name: The name and file location of the graph
:type graph_name: str
:param frequency: Frequency at which the visualisation should be made - must have been present in the Simulation
:type frequency: int
:param sf_spacing: The spacing between the microphones in the grid across the soundfield in metres
:type sf_spacing: float
:param zone_spacing: The spacing between the microphones in the grid in the zone in metres
:type zone_spacing: float
:param zone_alpha: Determines the evenness of the boundary - 0 is jagged, 2 is smooth. Above 2 is not recommended
:type zone_alpha: float
:param transfer_functions: ndarray of transfer functions shape (microphones, loudspeakers). None - calculated
:type transfer_functions: numpy.ndarray
:param grid: Grid of mic positions (x, y) matching up with the provided transfer function. None - calculated
:type grid: list
:return: The grid and transfer functions used in the visualisation to prevent their unnecessary recalculation.
:rtype: numpy.ndarray, list
"""
# create the grid of microphones for visualisation
if grid is None:
points = self.get_grid(sf_spacing)
for zone in self._zones:
points = np.concatenate((points, zone.get_grid(zone_spacing, alpha=zone_alpha)), 0)
else:
points = grid
# create the transfer functions for the vis mics
if transfer_functions is None:
vis_mics = MicrophoneArray([Microphone(position=points[i]) for i in range(len(points))])
tfs = sim.calculate_transfer_functions("microphones", mic_array=vis_mics, frequency=frequency)
else:
tfs = transfer_functions
# find the simulation frequency index of the visualisation frequency
nonzero_array = np.nonzero(sim.frequencies == frequency)
if len(nonzero_array[0]) == 0:
print("Please visualise a frequency for which filter weights have been calculated")
return
freq_index = nonzero_array[0][0]
# use the most recently calculated filter weights corresponding to the visualisation frequency to calc pressure
q_matrix = np.array(sim.ls_array.get_q(frequency_index=freq_index))
p = (tfs[0] @ q_matrix[:, None]).flatten()
p = np.abs(p)
p = convert_to_db(p)
# plot the pressure
step = 0.01
xi = np.arange(self._xy[0], self._xy[0] + self._width + step, step)
yi = np.arange(self._xy[1], self._xy[1] + self._height + step, step)
xi, yi = np.meshgrid(xi, yi)
zi = griddata(points, p, (xi, yi), method='cubic', fill_value=1)
self._axes.contourf(xi, yi, zi, np.arange(0, 1.01, 0.01))
colours = self._axes.pcolormesh(xi, yi, zi, vmin=0, vmax=90)
self._fig.colorbar(colours, cax=self._cax, orientation='horizontal')
# self._axes.pcolormesh(xi, yi, zi, vmin=0, vmax=90) # remove once colour bar added
# self._axes.plot(*zip(*points), marker=',', color='r', ls='') SHOWS MIC POSITIONS
self._fig.savefig(graph_name, dpi=300)
return tfs, points
class SoundObject:
"""
A sound object to be used in simulations of sound zones
Attributes
----------
colour : list
A list of float values (r, g, b)
position : list
A list of coordinates (x, y) describing the centre of the sound object
"""
def __init__(self, position=None, colour=None):
"""
Creates a sound object for use in sound zone simulations
:param position: A list of coordinates (x, y) describing the position of the sound object
:type position: list
:param colour: A list of float values (r, g, b) - None results in black (0, 0, 0)
:type colour: list
"""
if position is None:
self._position = [0, 0]
else:
self._position = position
if colour is None:
self._colour = [0, 0, 0]
else:
self._colour = colour
@property
def colour(self):
"""
A list of float values (r, g, b)
:return: A list of float values (r, g, b)
:rtype: list
"""
return self._colour
@property
def position(self):
"""
A list of coordinates (x, y) describing the position of the sound object
:return: (x, y) of the bottom left of the rectangle
:rtype: list
"""
return self._position
@position.setter
def position(self, val):
"""
Set A list of coordinates (x, y) describing the position of the sound object
"""
self._position[0] = val[0]
self._position[1] = val[1]
class Microphone(SoundObject):
"""
A microphone to be used in simulations of sound zones. Inherits from the sound object class.
Attributes
----------------
_radius : static float
The radius of circles used to represent the microphones when rendered in the soundfield
Attributes
----------
colour : list
A list of float values (r, g, b)
position : list
A list of coordinates (x, y) describing the position of the microphone
zone : str
The zone in which this microphone is situated, "bright", "dark" or "either"
purpose : str
The purpose of the microphone, "setup", "evaluation" or "either
_pressure : list
The pressure for each frequency at the microphone most recently calculated and set
"""
_radius = 0.001
@property
def radius(self):
"""
The radius of circles used to represent the microphones when rendered in the soundfield
:return: The radius
:rtype: float
"""
return type(self)._radius
@radius.setter
def radius(self, val):
"""
Set the radius of circles used to represent the microphones when rendered in the soundfield
:param val: Value to be set as radius
:type val: float
"""
_radius = val
def __init__(self, zone="none", purpose="none", position=None, colour=None):
"""
Creates a microphone to be used in sound zone simulations
:param zone: The zone in which this microphone is situated, "bright", "dark" or "either"
:type zone: str
:param purpose: The purpose of the microphone, "setup", "evaluation" or "either"
:type purpose: str
:param position: A list of coordinates (x, y) describing the centre of the sound object
:type position: list
:param colour: A list of float values (r, g, b) - None results in black (0, 0, 0)
:type colour: list
"""
SoundObject.__init__(self, position, colour)
self._zone = zone
self._purpose = purpose
self._pressure = []
@property
def zone(self):
"""
The zone in which this microphone is situated, "bright" or "dark"
:return: The zone
:rtype: str
"""
return self._zone
@property
def purpose(self):
"""
The purpose of the microphone, "setup" or "evaluation"
:return: The purpose
:rtype: str
"""
return self._purpose
@property
def pressure(self):
"""
The pressure for each frequency at the microphone most recently calculated and set
:return: The pressure
:rtype: list
"""
return self._pressure
@pressure.setter
def pressure(self, list):
"""
The pressure for each frequency at the microphone most recently calculated and set
:param list: List of pressures at each frequency
:type list: list
"""
self._pressure = list
class Loudspeaker(SoundObject):
"""
A loudspeaker to be used as a source in simulations of sound zones. Inherits from the sound object class.
Attributes
-----------
_width : static float
The width of rectangles used to represent loudspeakers when rendered in the soundfield
_height : static float
The height of rectangles used to represent loudspeakers when rendered in the soundfield
Attributes
----------
colour : list
A list of float values (r, g, b)
position : list
A list of coordinates (x, y) describing the position of the loudspeaker
look_at : list
A list of coordinates (x, y) describing the position the loudspeaker faces
q : list
The filter weight at each frequency most recently calculated and set
"""
_width = 0.08
_height = 0.1
@property
def width(self):
"""
The width of rectangles used to represent loudspeakers when rendered in the soundfield
:return: The width
:rtype: float
"""
return type(self)._width
@property
def height(self):
"""
The height of rectangles used to represent loudspeakers when rendered in the soundfield
:return: The height
:rtype: float
"""
return type(self)._height
def __init__(self, position=None, colour=None, look_at=None):
"""
Creates a Loudspeaker to be used as a source in sound zone simulations
:param position: A list of coordinates (x, y) describing the position of the loudspeaker
:type position: list
:param colour: A list of float values (r, g, b)
:type colour: list
:param look_at: A list of coordinates (x, y) describing the position the loudspeaker faces
:type look_at: list
"""
SoundObject.__init__(self, position, colour)
self._look_at = look_at
self.q = []
@property
def look_at(self):
"""
A list of coordinates (x, y) describing the position the loudspeaker faces
:return: a list of coordinates (x, y)
:rtype: list
"""
return self._look_at
class SoundObjectArray(list):
"""
A container class for sound objects
"""
def __init__(self, *args):
"""
Creates an array of sound objects
"""
list.__init__(self, *args)
def position_objects(self, positions):
"""
Position the sound objects
:param positions: A list of positions the same length as the number of objects
:type positions: numpy.ndarray
"""
for i in range(len(positions)):
self[i].position = positions[i]
def get_object_positions(self):
"""
Returns a list of the positions of the sound objects
:return: list of positions
:rtype: list
"""
return [self[i].position for i in range(len(self))]
def __add__(self, other):
return type(self)(list.__add__(self, other))
def __iadd__(self, other):
return type(self)(list.__add__(self, other))
class LoudspeakerArray(SoundObjectArray):
"""
A container class for loudspeakers
"""
def initialise_q(self, num_frequencies):
"""
Initialise the filter weights of the loudspeakers in the loudspeaker array to one.
:param num_frequencies: The number of frequencies in the simulation.
:type num_frequencies: int
"""
for ls in self:
ls.q = np.ones(num_frequencies, complex)
def set_q(self, new_q, frequency_index):
"""
Set the filter weight values for the loudspeaker array at the frequency index
:param new_q: The list of filter weights
:type new_q: list
:param frequency_index: The index at which the relevant frequency is stored in the simulation's frequency vector
:type frequency_index: int
"""
for i in range(len(new_q)):
self[i].q[frequency_index] = new_q[i]
def get_q(self, frequency_index):
"""
Get the filter weight values for the loudspeaker array at the frequency index
:param frequency_index: The index at which the relevant frequency is stored in the simulation's frequency vector
:type frequency_index: int
:return: The list of filter weights calculated for the relevant frequency
:rtype: list
"""
if frequency_index < 0:
return [ls.q for ls in self]
else:
return [ls.q[frequency_index] for ls in self]
class MicrophoneArray(SoundObjectArray):
"""
A container class for microphones
"""
def initialise_pressures(self, num_frequencies):
"""
Initialise the pressures of the microphones in the microphone array to zero.
:param num_frequencies: The number of frequencies in the simulation.
:type num_frequencies: int
"""
for mic in self:
mic.pressure = np.zeros(num_frequencies, complex)
def set_pressures(self, new_pressures, frequency_index):
"""
Set the pressures values for the microphone array at the frequency index
:param new_pressures: The list of pressures
:type new_pressures: list
:param frequency_index: The index at which the relevant frequency is stored in the simulation's frequency vector
:type frequency_index: int
"""
for i in range(len(new_pressures)):
self[i].pressure[frequency_index] = new_pressures[i]
def get_pressures(self, frequency_index):
"""
Returns the pressure values for the microphone array at the frequency index
:param frequency_index: The index at which the relevant frequency is stored in the simulation's frequency vector
:type frequency_index: int
:return: The list of pressures for the relevant frequency
:rtype: list
"""
if frequency_index < 0:
return [mic.pressure for mic in self]
else:
return [mic.pressure[frequency_index] for mic in self]
def get_subset(self, zone="either", purpose="either"):
"""
Returns a subset of microphones from the array with the required properties, "bright", "dark" or "either" zone
and "setup", "evaluation" or "either" purpose.
:param zone: The zone in which the subset of microphones should be positioned - "bright", "dark" or "either"
:type zone: str
:param purpose: The purpose of the subset of microphones - "setup", "evaluation" or "either"
:type purpose: str
:return: A microphone array containing microphones of the specified requirements
:rtype: MicrophoneArray
"""
mics = MicrophoneArray()
for i in range(len(self)):
if (self[i].zone in (zone, "either") or zone is "either") and \
(self[i].purpose in (purpose, "either") or purpose is "either"):
mics.append(self[i])
return mics
# ********* Simulation and Evaluation *********
class Simulation:
"""
Where all of the calculations for the simulation happen. Using the input of the different array and zone
geometries calculates filter weights across a range of frequencies for different methods of sound zone optimisation
- Brightness Control, Acoustic Contrast Control, Planarity Control and Pressure Matching.
Attributes
----------
frequencies : numpy.ndarray
vector of frequencies
_omega : numpy.ndarray
vector of angular frequencies
_k : numpy.array
vector of wave numbers
_c : float
the speed of sound
_rho : float
the density of air
_target_spl : float
The target SPL in dB in the bright zone
_target_pa : float
The target pressure in the bright zone
ls_array : LoudspeakerArray
The loudspeaker array used in the simulation
mic_array : MicrophoneArray
The microphone array used in the simulation
_tf_array_ls : numpy.ndarray
The transfer functions with the loudspeakers in the first axis
_tf_array_mic : numpy.ndarray
The transfer functions with the microphones in the first axis
_q_ref : float
The reference filter weight for a loudspeaker to realise the target SPL. Used in effort calculations
_current_method : str
A string of the current method, i.e the most recently calculated filter weights and metrics refer to this method
steering_vectors : list
A list of steering vectors to contain the setup and evaluation steering vectors for the bright zone
_planarity_constants : tuple
A tuple containing three constants for Planarity Control. (start_angle, end_angle, transition_angle)
_pm_angle : float
The angle of incidence for the Pressure Matching method.
"""
def __init__(self, frequencies, ls_array, mic_array, c=343, rho=1.21, target_spl=76,
planarity_constants=(360, 360, 5), pm_angle=0):
"""
Creates a Simulation object to create transfer functions and group important constants for
simulation calculations
:param frequencies: The list of frequencies to run the simulations over
:type frequencies: numpy.ndarray
:param ls_array: The loudspeaker array used
:type ls_array: LoudspeakerArray
:param mic_array: The microphone array used
:type mic_array: MicrophoneArray
:param c: The speed of the sound
:type c: float
:param rho: The density of air
:type rho: float
:param target_spl: The target SPL in the bright zone
:type target_spl:
:param planarity_constants: A tuple of floats containing the start_angle, end_angle and transiton zone for the
angular window in planarity control
:type planarity_constants: tuple
:param pm_angle: The angle of incidence for pressure matching
:type pm_angle: float
"""
if isinstance(frequencies, int):
frequencies = [frequencies]
self.frequencies = np.array(frequencies)
self._omega = [frequencies[i] * 2 * np.pi for i in range(len(frequencies))]
self._k = [self._omega[i] / c for i in range(len(frequencies))]
self._c = c
self._rho = rho
self._target_spl = target_spl
self._target_pa = 0.00002 * 10 ** (target_spl/20)
self.ls_array = ls_array
self.ls_array.initialise_q(len(frequencies))
self.mic_array = mic_array
self.mic_array.initialise_pressures(len(frequencies))
self._tf_array_ls = np.array(self.calculate_transfer_functions("loudspeakers"))
self._tf_array_mic = np.transpose(np.array(self._tf_array_ls), (0, 2, 1))
bright_mics, ga = self.get_tf_subset("loudspeakers", zone="bright")
avg_pressure = 0
for i in range(len(frequencies)):
ref_source = ga[i][0]
avg_pressure += np.sqrt((ref_source.conj().T @ ref_source) / len(bright_mics))
avg_pressure /= len(frequencies)
self._q_ref = self._target_pa / avg_pressure
self._current_method = None
self.steering_vectors = [None, None]
self._planarity_constants = planarity_constants
self._pm_angle = pm_angle
@property
def omega(self):
"""
vector of angular frequencies
:return: vector of angular frequencies
:rtype: numpy.ndarray
"""
return self._omega
@property
def c(self):
"""
The speed of sound
:return: the speed of sound
:rtype: float
"""
return self._c
@property
def rho(self):
"""
The density of air
:return: The density of air
:rtype: float
"""
return self._rho
@property
def k(self):
"""
vector of wave numbers
:return: vector of wave numbers
:rtype: numpy.ndarray
"""
return self._k
@property
def target_spl(self):
"""
The target SPL in the bright zone
:return: The target SPL in the bright zone
:rtype: float
"""
return self._target_spl
def calculate_transfer_functions(self, orientation, mic_array=None, frequency=None):
"""
Calculate transfer functions with the given orientation. If no mic_array or frequencies are provided the
transfer functions returned are those of the mic_array and frequency initialised when creating the simulation.
Be advised that the transfer functions for the simulation are already made when you create a simulation object.
This should only need to be used for external calculations hence why the optional parameters are provided.
:param orientation: String to choose the orientation of transfer functions - "microphones" or "loudspeakers"
:type orientation: str
:param mic_array: The MicrophoneArray to be used. Defaults to None - the simulations microphone array
:type mic_array: MicrophoneArray
:param frequency: The frequencies across which the transfer functions are calculated
:type frequency: numpy.ndarray
:return: A array of shape (freq, mics, ls) or (freq, ls, mic) depending on orientation
:rtype: numpy.ndarray
"""
if mic_array is None:
mic_array = self.mic_array
if frequency is None:
k = self._k
elif type(frequency) is int:
k = [(2 * np.pi * frequency) / self.c]
else:
k = (2 * np.pi * frequency) / self.c
mic_num = len(mic_array)
ls_num = len(self.ls_array)
r = np.zeros((ls_num, mic_num), float)
tf_array = np.zeros((len(self.frequencies), ls_num, mic_num), complex)
for m in range(mic_num):
for n in range(ls_num):
# distance from each source to each microphone
r[n][m] = np.sqrt((mic_array[m].position[0] - self.ls_array[n].position[0]) ** 2 + \
(mic_array[m].position[1] - self.ls_array[n].position[1]) ** 2)
for i in range(len(k)):
if r[n][m] == 0:
tf_array[i][n][m] = 1
else:
tf_array[i][n][m] = (1 / (4 * np.pi * r[n][m])) * np.exp(-1j * k[i] * r[n][m])
if orientation == "microphones":
return np.transpose(np.array(tf_array), (0, 2, 1))
elif orientation == "loudspeakers":
return
|
np.array(tf_array)
|
numpy.array
|
import math
import numpy as np
import sklearn
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVC
from sklearn.metrics import cohen_kappa_score
def ridge_regression(K1, K2, y1, y2, alpha, c):
n_val, n_train = K2.shape
clf = KernelRidge(kernel = "precomputed", alpha = alpha)
one_hot_label = np.eye(c)[y1] - 1.0 / c
clf.fit(K1, one_hot_label)
z = clf.predict(K2).argmax(axis = 1)
return 1.0 * np.sum(z == y2) / n_val
def svm(K1, K2, y1, y2, C, c):
n_val, n_train = K2.shape
clf = SVC(kernel = "precomputed", C = C, cache_size = 100000)
clf.fit(K1, y1)
z = clf.predict(K2)
return 1.0 * np.sum(z == y2) / n_val
def gen_bound(K, y):
alpha = np.linalg.solve(K, y)
C = alpha.T.dot(K).dot(alpha)
return np.sum(np.sqrt(np.diag(C))) * np.sqrt(np.trace(K)) / K.shape[0]
def normalize(K):
L = np.diag(K)
return K / np.clip(np.sqrt(
|
np.outer(L, L)
|
numpy.outer
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 13:47:32 2018
@author: JHodges
"""
#import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import util_common as uc
import parse_elevation as pe
import parse_modis_file as pm
import parse_asos_file as pa
import remapSwathData as rsd
#from parse_asos_file import ASOSMeasurementList, ASOSStation
import pyhdf.SD as phdf
#import matplotlib.pyplot as plt
import datetime as dt
import numpy as np
import scipy.interpolate as scpi
import psutil
import gc
import time
import sys
import glob
import pickle
import scipy as sp
class GriddedMeasurement(object):
''' This class contains a measurement on a latitude and longitude grid.
Fields:
dateTime: Time stamp of the measurement as a datetime.datetime
latitude: Latitude of each gridded point as a numpy.ndarray
longitude: Longitude of each gridded point as a numpy.ndarray
data: Measurement of each gridded point as a numpy.ndarray
label: Label to use when plotting a contour of this measurement
dataName: Name associated with this measurement
clim: Contour limits to use when plotting a contour of this
measurement
Functions:
mats2pts: This will reshape the latitude and longitude matrices
into arrays and return an Nx2 numpy array with [lat,lon]
remap: This will remap data, latitude, and longitude to a new grid
specified by new_lat and new_lon matrices
computeMemory: This will calculate the memory usage by this object
strTime: This will return a string containing the time stamp
associated with this measurement as a string
'''
__slots__ = ['dateTime','latitude','longitude','data','label','dataName','clim'] # 'remapped'
def __init__(self,dateTime,lat,lon,data,label):
self.dateTime = dateTime
self.latitude = lat
self.longitude = lon
self.data = data
self.label = label
self.clim = None
def __str__(self):
''' This function prints summary information of the object when a
string is requested.
'''
if self.dateTime is not None:
dts = time.mktime(self.dateTime.timetuple())+self.dateTime.microsecond/1E6
dts = time.strftime('%Y%j%H%M%S',time.localtime(dts))
else:
dts = 'Not Considered'
string = "Gridded Measurement\n"
string = string + "\tType:\t%s\n"%(self.dataName)
string = string + "\ttime:\t%s (yyyydddhhmmssss)\n"%(dts)
string = string + "\tgrid:\t%.0f,%.0f (Latitude,Longitude)\n"%(self.data.shape[0],self.data.shape[1])
string = string + "\tmemory:\t%.4f MB"%(self.computeMemory())
return string
def __repr__(self):
''' This function prints summary information of the object when a
string is requested.
'''
return self.__str__()
def mats2pts(self,lat,lon):
''' This will reshape the latitude and longitude matrices into arrays
and return an Nx2 numpy array with [lat,lon]
'''
lat = np.reshape(lat,(lat.shape[0]*lat.shape[1]))
lon = np.reshape(lon,(lon.shape[0]*lon.shape[1]))
pts = np.zeros((len(lat),2))
pts[:,0] = lat
pts[:,1] = lon
return pts
def removeNans(self,points,values):
inds = np.where(~np.isnan(points[:,0]) & ~np.isnan(points[:,1]) & ~np.isnan(values))
newPoints = points[inds]
newValues = values[inds]
return newPoints, newValues
def remap(self,new_lat,new_lon,ds=10,method='linear'):
''' This will remap data, latitude, and longitude to a new grid
specified by new_lat and new_lon matrices.
NOTE: ds defines how much to downsample the original grid prior to
remapping (scpi.griddate can use too much memory).
NOTE: method defines what method to use in the resampling process. If
method='linear' bilinear interpolation will be used. If
method='nearest' nearest neighbor value will be used.
'''
oldpts = self.mats2pts(self.latitude,self.longitude)
values =
|
np.reshape(self.data,(self.data.shape[0]*self.data.shape[1],))
|
numpy.reshape
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.notation.munsell` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from colour.notation.munsell import (
parse_munsell_colour,
is_grey_munsell_colour,
normalize_munsell_specification)
from colour.notation.munsell import (
munsell_colour_to_munsell_specification,
munsell_specification_to_munsell_colour)
from colour.notation.munsell import (
xyY_from_renotation,
is_specification_in_renotation)
from colour.notation.munsell import bounding_hues_from_renotation
from colour.notation.munsell import hue_to_hue_angle, hue_angle_to_hue
from colour.notation.munsell import hue_to_ASTM_hue
from colour.notation.munsell import (
interpolation_method_from_renotation_ovoid,
xy_from_renotation_ovoid)
from colour.notation.munsell import LCHab_to_munsell_specification
from colour.notation.munsell import maximum_chroma_from_renotation
from colour.notation.munsell import munsell_specification_to_xy
from colour.notation.munsell import (
munsell_specification_to_xyY,
xyY_to_munsell_specification)
from colour.notation import (
munsell_value_priest1920,
munsell_value_munsell1933,
munsell_value_moon1943,
munsell_value_saunderson1944,
munsell_value_ladd1955,
munsell_value_mccamy1987,
munsell_value_ASTM_D1535_08)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['MUNSELL_SPECIFICATIONS',
'MUNSELL_GREYS_SPECIFICATIONS',
'MUNSELL_EVEN_SPECIFICATIONS',
'MUNSELL_BOUNDING_HUES',
'MUNSELL_HUE_TO_ANGLE',
'MUNSELL_HUE_TO_ASTM_HUE',
'MUNSELL_INTERPOLATION_METHODS',
'MUNSELL_XY_FROM_RENOTATION_OVOID',
'MUNSELL_SPECIFICATIONS_TO_XY',
'MUNSELL_COLOURS_TO_XYY',
'MUNSELL_GREYS_TO_XYY',
'XYY_TO_MUNSELL_SPECIFICATIONS',
'XYY_TO_MUNSELL_GREYS_SPECIFICATIONS',
'NON_CONVERGING_XYY',
'TestMunsellValuePriest1920',
'TestMunsellValueMunsell1933',
'TestMunsellValueMoon1943',
'TestMunsellValueSaunderson1944',
'TestMunsellValueLadd1955',
'TestMunsellValueMcCamy1992',
'TestMunsellValueASTM_D1535_08',
'TestMunsellSpecification_to_xyY',
'TestMunsellColour_to_xyY',
'TestxyY_to_munsell_specification',
'TestxyY_to_munsell_colour',
'TestParseMunsellColour',
'TestIsGreyMunsellColour',
'TestNormalizeMunsellSpecification',
'TestMunsellColourToMunsellSpecification',
'TestMunsellSpecificationToMunsellColour',
'Test_xyY_fromRenotation',
'TestIsSpecificationInRenotation',
'TestBoundingHuesFromRenotation',
'TestHueToHueAngle',
'TestHueAngleToHue',
'TestHueTo_ASTM_hue',
'TestInterpolationMethodFromRenotationOvoid',
'Test_xy_fromRenotationOvoid',
'TestLCHabToMunsellSpecification',
'TestMaximumChromaFromRenotation',
'TestMunsellSpecification_to_xy']
# TODO: Investigate if tests can be simplified by using a common valid set of
# specifications.
MUNSELL_SPECIFICATIONS = (
(2.5, 7.9653798470827155, 11.928546308350969, 4),
(2.5, 6.197794822090879, 6.923610826208884, 4),
(2.5, 5.311956978256753, 2.0, 4),
(5.613007062442384, 8.402756538070792, 18.56590894044391, 4),
(5.845640071004907, 8.062638664520136, 5.782325614552295, 4),
(5.780794121059599, 3.174804081025836, 3.3492086825591487, 4),
(5.483684299639117, 3.8994120994080133, 5.761459062506715, 4),
(5.809580308813496, 5.816975143899512, 6.662613753958899, 4),
(5.209252955662903, 2.9770364483569107, 5.141472643810014, 4),
(7.706105853911573, 2.789942201654241, 11.396648897274897, 4),
(7.5675942867463615, 9.569378264154928, 16.714918860774414, 4),
(8.117640564564343, 2.7489429651492028, 3.1653563832640272, 4),
(7.8731203012311255, 2.6438472620092806, 13.241107969297714, 4),
(8.04983322214289, 2.4630649870973422, 7.501924679081063, 4),
(8.355307569391062, 2.703242274198649, 11.925441344336392, 4),
(8.342795760577609, 1.0627446691234035, 6.298818145909256, 4),
(7.5947244020062845, 1.5750745121803325, 4.626613135331287, 4),
(8.19517786608579, 8.732504313513864, 23.571122010181508, 4),
(7.754763634912469, 8.437206137825585, 21.00944901061068, 4),
(9.010231962978862, 6.1312711883866395, 6.803370568930175, 4),
(9.041566851651622, 6.4540531985593965, 17.010037203566448, 4),
(9.915652169827913, 8.56438797679146, 11.13108215988432, 4),
(10.0, 8.651470349341308, 27.322046186799103, 4),
(9.961336111598143, 8.039682739223524, 13.20009863344056, 4),
(9.887406551063181, 8.321342653987184, 2.0660963235598375, 4),
(10.0, 3.400787121787084, 2.5700932200974145, 4),
(10.0, 3.063915609453643, 13.514066607169514, 4),
(10.0, 5.461465491798149, 12.753899774963989, 4),
(10.0, 5.90081409486059, 15.244598276849418, 4),
(10.0, 5.4222087054147545, 27.929001019877095, 4),
(9.757039645743053, 5.653647411872443, 3.4112871270786895, 4),
(10.0, 5.790357134071424, 24.86360130658431, 4),
(9.862075817629322, 4.487864213671867, 7.67196809500038, 4),
(3.2140937198013564, 9.345163595199718, 3.4367939376082868, 3),
(3.484005759599379, 9.572118958552942, 14.905079424139613, 3),
(3.1967035260607033, 9.059573376604588, 24.78003138905329, 3),
(2.5, 9.479129956842218, 27.736581704977635, 3),
(2.7908763449337677, 8.166099921946278, 20.868304564027603, 3),
(3.221499566897477, 5.507741920664265, 5.467726257137659, 3),
(2.622512070432247, 5.989380652373817, 19.364472252973304, 3),
(3.2873061024849806, 5.439892524933965, 19.855724192587914, 3),
(5.727612405003367, 3.013295327457818, 10.746642552166502, 3),
(5.347955701149093, 3.003537709503816, 18.900471815194905, 3),
(5.7385751713204325, 3.987559993529851, 4.223160837759656, 3),
(5.720824103581511, 1.804037523043165, 4.878068159363519, 3),
(5.316780024484356, 1.0305080135789524, 8.043957606541364, 3),
(5.7623230008312385, 1.6541934959363132, 9.507411716255689, 3),
(5.985579505387595, 2.2109765673980277, 14.803434527189347, 3),
(5.461619603420755, 2.805568235937479, 6.6471547360970025, 3),
(7.838277926195208, 2.8050500161595604, 6.238528025218592, 3),
(8.2830613968175, 2.716343821673611, 10.350825174769154, 3),
(7.603155032355272, 6.1394212951580345, 29.139541165198704, 3),
(8.324115039527976, 6.971801555303874, 23.515778973195257, 3),
(8.44424273124686, 6.657492305333222, 2.4130843113046656, 3),
(8.309061774521076, 6.371190719454564, 17.507252134514488, 3),
(8.14037117068092, 2.6868573867536836, 14.649933295354042, 3),
(8.484903553213694, 2.2057045177976002, 11.879562262633948, 3),
(8.454109029623016, 2.3630506284708144, 4.606317173304252, 3),
(8.305262429168986, 5.460535517182709, 3.9045072719017924, 3),
(8.189730004579287, 5.069933398792441, 28.126992759236863, 3),
(7.54028778107475, 5.779995612547662, 6.635319193935916, 3),
(7.9629991342362985, 5.233597701388516, 20.293354805626866, 3),
(8.432959559038371, 5.797128354507666, 26.469970873757067, 3),
(10.0, 9.005161484782885, 6.0469956581432704, 3),
(9.771353946056914, 9.383759836829901, 20.82975271547889, 3),
(9.376380796522223, 9.46044820450894, 13.348522394106682, 3),
(9.912704179532229, 4.057804958576875, 25.778231770351923, 3),
(10.0, 4.853695964045051, 13.712247643370837, 3),
(10.0, 4.221211292509457, 28.587923360931033, 3),
(9.287535146732925, 4.404206868704275, 6.997389565284625, 3),
(10.0, 5.717897422867529, 30.932435068478792, 3),
(10.0, 5.121046242854478, 7.946854746461393, 3),
(10.0, 5.631186501571907, 26.172410297895773, 3),
(2.5, 6.822278767379375, 12.643410557057086, 2),
(2.5, 3.3435596434006034, 19.167537762557394, 2),
(3.284581774573411, 3.7457477655465423, 10.316761862277126, 2),
(3.0814075494281132, 3.302789020993419, 4.031683724514751, 2),
(2.5, 9.595267222759654, 9.136435041220121, 2),
(2.5899169115530087, 9.55055785508054, 8.263133397233354, 2),
(2.5342634625499727, 9.494299074607266, 14.863663104253218, 2),
(5.275920564662094, 9.02282018751374, 12.879135949769728, 2),
(5.522856449128964, 9.387711396347438, 17.412586595686815, 2),
(5.885914939777947, 9.191119089368966, 17.388086814072437, 2),
(5.4717401116974616, 9.868862187868638, 11.646848538821667, 2),
(5.956560321967156, 4.186123335197883, 4.31169020481439, 2),
(5.6279111948942635, 4.547202429787774, 16.56681914443115, 2),
(5.8534547245334565, 4.592599799739227, 18.83506980508535, 2),
(5.144720369630256, 5.318575486426688, 18.979172966805407, 2),
(5.2907074463880175, 6.000990946276877, 13.598520998056053, 2),
(5.415844403197766, 6.398031110922737, 15.178617464461626, 2),
(8.204144852288245, 5.902107978077237, 4.020177691372295, 2),
(9.366069953403018, 3.3728653869498273, 15.422766182794579, 2),
(10.0, 3.949081763597084, 9.192387616705815, 2),
(10.0, 3.187455579956449, 15.954247893607032, 2),
(9.260586271537607, 3.4545177339210404, 10.59517579170162, 2),
(9.571675864670619, 3.149737124891618, 17.398847531397934, 2),
(3.2387393821759787, 4.827650915864795, 3.7435106940988625, 1),
(2.5, 4.30220435408426, 7.399343614420917, 1),
(2.5, 4.329470943798639, 8.860840417367838, 1),
(2.5, 7.620094327678255, 10.887265616829124, 1),
(2.5, 7.1449996531857725, 10.10233537418591, 1),
(2.6104349455855846, 7.700939489093993, 4.236171515065992, 1),
(2.5, 8.524455647347406, 5.3613636980274295, 1),
(3.1731014606584806, 8.133658146416419, 15.199536235308303, 1),
(2.5, 7.129372162253073, 5.4202608625739925, 1),
(2.5, 7.70850985024877, 9.619364938403443, 1),
(3.252581509053177, 7.081532543557421, 6.2224060204343745, 1),
(2.5, 7.67557944940156, 12.261808397585057, 1),
(2.5, 3.4825807865537914, 7.768505546917617, 1),
(2.5, 3.020783157962588, 6.998840911724095, 1),
(3.020562119690717, 3.1223174909201346, 5.203087539105082, 1),
(5.2190911687613255, 3.0070655951585925, 13.573887550967275, 1),
(5.5962506280473505, 2.15728255216339, 5.165106850365733, 1),
(5.078574838897358, 2.9637552645895053, 6.8599427244043705, 1),
(5.1756171558445825, 2.772951703906637, 4.56080038214103, 1),
(5.497353020782844, 5.410551418942688, 2.0, 1),
(5.841773513544001, 5.686667624085427, 13.28936566781855, 1),
(5.580549185463668, 6.964187735662777, 16.1803201492634, 1),
(5.287772726922527, 6.865396694853934, 14.098946461580404, 1),
(8.358221285614269, 4.591594256415192, 17.271563597297103, 1),
(7.87724479635977, 4.744438140664897, 5.598934346859475, 1),
(8.323336953587479, 4.566800376285041, 7.0881523668119195, 1),
(7.845486096299681, 4.586270737017715, 16.23379517928239, 1),
(8.382569502344943, 4.562211644069123, 13.97512411087629, 1),
(7.855593749782354, 3.238350356301548, 5.360435825061775, 1),
(7.655501153733914, 3.903923881082662, 9.769593047963392, 1),
(7.653019158008493, 6.348396270933699, 11.704589766625281, 1),
(10.0, 2.7176353295329094, 5.415846167802247, 1),
(9.196648156004963, 8.15078293499349, 5.069223366759241, 1),
(10.0, 6.040694822625091, 7.76280231640685, 1),
(10.0, 6.719017792521678, 18.37437538640251, 1),
(2.8739501345809, 3.5100389001084373, 4.494521106912674, 10),
(2.979763831715893, 8.5642374861117, 6.426710793964199, 10),
(2.5, 8.924876646785982, 2.491252841450378, 10),
(2.5, 8.121352187119456, 8.82337986403619, 10),
(2.5, 4.643160393937538, 18.83933997786449, 10),
(2.5, 4.925443059836121, 5.417711811598947, 10),
(2.5, 8.509385882792433, 8.04535672534691, 10),
(2.5, 2.709647356385667, 16.195810159806815, 10),
(5.6678871626197305, 1.8444622064585485, 18.226010811743183, 10),
(5.759673840199206, 1.960972599684376, 30.42873152741525, 10),
(5.783634661463273, 1.5360819708237339, 21.480194214511137, 10),
(5.118173248862928, 1.5400563354602976, 41.86847335857883, 10),
(5.757349724389667, 1.6383453350505301, 13.609604267804956, 10),
(5.279304061296045, 4.900840641360432, 22.876127528048663, 10),
(5.715709801059808, 4.570357108788123, 30.360213488022158, 10),
(5.947947304520848, 4.273422536180247, 4.8966439066197935, 10),
(5.09899322481724, 4.947505227279317, 26.26875042475258, 10),
(5.53222949762985, 4.629910893964432, 7.756449262721482, 10),
(5.923584541768192, 4.593239396795306, 19.567605030849386, 10),
(5.950156387030171, 2.42463499343633, 4.953666946161412, 10),
(5.614158136535322, 2.4812727587161407, 20.644953904366893, 10),
(5.435908140730638, 2.7884847594702746, 21.585064332200393, 10),
(5.539908561343329, 2.9864344023506266, 44.90369903995316, 10),
(5.3792514320991325, 2.137036038265424, 25.88907455882873, 10),
(5.632909830682246, 5.9349482115124506, 21.384042506861697, 10),
(5.20332651493292, 5.825367195549048, 15.514467427422431, 10),
(5.927793692134072, 5.448079050348612, 3.7766395197414253, 10),
(5.817322396187511, 5.292185862716667, 11.31804158090752, 10),
(7.949960633591607, 2.873765731226449, 25.621368902089333, 10),
(8.382592436810759, 2.461570417216745, 40.54127195292601, 10),
(7.96379736332257, 2.200134671312228, 36.70731870996695, 10),
(8.373924456610474, 2.3066883154384743, 8.623846064990166, 10),
(8.151990686473388, 2.2622251239305577, 42.229127196458144, 10),
(8.25502764532606, 9.609182815192318, 7.080986046028279, 10),
(8.488384085232076, 8.098523111957578, 9.779628072315807, 10),
(8.438357068876163, 2.6893452283620705, 26.873452492074044, 10),
(8.309434906530441, 2.4623229011742396, 48.49966399344499, 10),
(7.7115794149655015, 2.724728645017314, 5.729859843354196, 10),
(7.6273740879401934, 2.2251923932068416, 26.724973070776922, 10),
(7.693923337226084, 2.6579274123978887, 48.407897505690485, 10),
(10.0, 6.197418391023862, 10.97195381591066, 10),
(9.113097274740381, 6.270996638245157, 2.7564645951736484, 10),
(10.0, 9.235232580795238, 6.003388325186025, 10),
(10.0, 5.050367446997329, 19.170756721559698, 10),
(9.380110088755156, 5.5356649305105154, 18.817507743754415, 10),
(9.001795946577033, 7.786061808916703, 4.453854563212078, 10),
(10.0, 7.692030956316567, 3.653159723688856, 10),
(9.046182896421445, 3.0439259875156295, 22.300946806849847, 10),
(9.459420796383784, 3.0372188559586917, 10.552556949414955, 10),
(10.0, 3.3229506269252425, 31.2476220198124, 10),
(10.0, 3.1004893435032645, 29.2734347311525, 10),
(2.5, 7.990213836555715, 8.375074375178261, 9),
(2.5, 7.298301069157875, 9.502846862649331, 9),
(2.8619005171223564, 7.275426002317967, 7.466126134628901, 9),
(3.0874221941355513, 8.485000561300847, 2.493857829360787, 9),
(2.5, 3.690667859366627, 19.77471678075617, 9),
(3.220553507003754, 3.281507210559706, 37.05938066272616, 9),
(2.5, 3.8989428412499203, 39.166418500944374, 9),
(2.7654037016841957, 3.1169069187360945, 29.726535569137937, 9),
(2.5, 3.703448940191029, 12.087654687250128, 9),
(2.5, 3.433194385943258, 3.382852759577178, 9),
(2.836612137080781, 3.9924265837199604, 2.0, 9),
(2.8888545547050946, 3.2474346036095905, 14.618307037832857, 9),
(5.164399331990519, 6.2346627424063925, 9.111465383743912, 9),
(5.500356903003388, 6.736841239972426, 13.154128131968298, 9),
(5.535810057742433, 6.970342536034459, 8.892716664134475, 9),
(5.590040966343994, 3.5668609688847175, 22.75661278689855, 9),
(5.282620261743346, 3.2367340323019573, 18.732823688754383, 9),
(5.172895640160181, 3.0043051231231956, 6.2292543458148515, 9),
(5.259721854731981, 3.3004333429874864, 35.890872110681414, 9),
(5.5536463415959245, 3.4948508349893386, 10.076683709549055, 9),
(5.730003972159145, 2.488034141173207, 15.985698390269977, 9),
(5.782381516990652, 2.4812045413951833, 28.774618518379302, 9),
(5.069379781665461, 6.741533325352479, 2.2194841714206595, 9),
(5.1346796709796605, 6.103139133682482, 27.726398643923417, 9),
(5.383260687864624, 5.56099784134289, 18.302295934127923, 9),
(5.869792088464701, 5.233311379347905, 32.55343216796663, 9),
(5.462451143540612, 5.746471808899983, 30.948864634440213, 9),
(5.357445269639698, 5.68852667194441, 5.261434469006405, 9),
(5.626373453003034, 5.771003693827525, 25.170846666445236, 9),
(8.284200895164993, 2.466049819474928, 17.238899804160177, 9),
(8.318102784124019, 2.2658035726687236, 22.596147383535918, 9),
(7.851936866242713, 7.45229335345878, 20.962374407911458, 9),
(8.146081336032703, 7.714405906472637, 13.533962918469337, 9),
(8.09720864316275, 7.247339841946607, 17.33899155052454, 9),
(7.830256291991797, 6.872416994269415, 10.706822163825924, 9),
(7.80065897068848, 6.330678323824742, 6.211375680877805, 9),
(8.044863647118635, 6.808226317611471, 15.557155261544228, 9),
(8.461774802909071, 4.745965252820717, 36.03729693977732, 9),
(7.612382882207284, 4.372367470892327, 14.168690780706225, 9),
(8.169633927695997, 4.48833473800357, 27.23584610386441, 9),
(9.602031136015775, 5.527970638413552, 20.5806356758181, 9),
(9.663686030178818, 5.516978463101205, 29.047658472982956, 9),
(9.75292854736471, 5.461162553197844, 34.11493160528129, 9),
(10.0, 5.650424904167431, 4.216215730437086, 9),
(10.0, 5.73654367766597, 34.72852675583916, 9),
(10.0, 5.4360854849263855, 14.779627294882367, 9),
(10.0, 5.79544155795279, 2.0, 9),
(9.49705091394873, 5.914105479148815, 10.80885478009873, 9),
(9.826635163465532, 1.9759992882300867, 7.06711443184985, 9),
(9.382502350301259, 4.709738717837755, 19.999476877446362, 9),
(9.115530591819274, 4.986025386567032, 5.883436488694818, 9),
(10.0, 4.813033015882831, 24.745870232952445, 9),
(9.378359588580793, 4.574376802251692, 26.295787257422923, 9),
(10.0, 2.1709322459501545, 21.57257635660235, 9),
(10.0, 2.5713046143569223, 26.039872491235577, 9),
(2.5, 2.6357605512712707, 4.712138166253982, 8),
(2.8874578666829285, 2.0337681540970594, 13.994896052145748, 8),
(3.435419560439465, 2.2299190864211247, 6.718989113532732, 8),
(2.9925336062737173, 1.928933557645075, 7.198014339866309, 8),
(2.5, 1.3726890604845965, 14.156726710024465, 8),
(2.6104579288975813, 1.2137704813997643, 3.3458156268951917, 8),
(5.1670653045538115, 7.761502840367845, 2.1409481568506346, 8),
(5.054434114346951, 7.011456904063963, 6.442157332603133, 8),
(5.803735682450612, 8.51299345440391, 10.443841773523394, 8),
(5.044877539779968, 6.342036003669621, 18.424428701407553, 8),
(5.484832402621484, 6.739510598555563, 5.474777491295647, 8),
(5.162300427200289, 6.57672216934989, 24.999056248525125, 8),
(5.877256360743413, 6.789776791182118, 15.450444143259661, 8),
(8.197449080109873, 2.2092984979309276, 2.0, 8),
(7.997237265754237, 2.060313094466323, 11.655829335806517, 8),
(7.973192560907184, 8.67128307488709, 4.272886886879181, 8),
(7.836498646186221, 8.168701526186094, 13.596658717999025, 8),
(7.782186965908517, 9.202193528464585, 13.902105524067945, 8),
(9.531795266771761, 5.037755377967032, 2.0, 8),
(10.0, 5.41661210331397, 11.055624912778937, 8),
(9.312270837393163, 7.466203120412419, 11.185222099189973, 8),
(10.0, 7.097905887270363, 13.895455902446677, 8),
(9.925669940032272, 4.692192166283825, 7.2040789887667955, 8),
(9.416740882402403, 4.697368796121149, 8.720116348180492, 8),
(10.0, 4.338509514756336, 16.469698910991372, 8),
(10.0, 6.402201264456283, 6.599237233947309, 8),
(10.0, 5.182208073338139, 4.550269784467781, 8),
(9.970332530519679, 5.903209540812212, 10.837022722087644, 8),
(2.962707587174585, 9.2513521634857, 9.999116931630539, 7),
(3.1672052728994915, 9.141134617154027, 7.383624729892915, 7),
(2.5, 5.049858089466979, 17.881593853007615, 7),
(2.7415018638966284, 5.680976628228491, 18.00290873780138, 7),
(2.5, 5.8481154189353175, 10.232668996271492, 7),
(2.877902226185231, 5.567414385297515, 3.5582034231201787, 7),
(2.5, 5.8534450733346, 27.77999592691697, 7),
(5.412821771284458, 2.5214549204115335, 7.258040020605607, 7),
(5.83754747605084, 2.530273181625722, 11.998261380615471, 7),
(5.9693975439749885, 4.3487706338488, 14.397906420283302, 7),
(5.004079000563381, 4.657273345320005, 22.736677614468775, 7),
(5.168438425945292, 4.24641271720769, 4.844860547907693, 7),
(5.863284315202094, 4.359153796629064, 23.489710023246513, 7),
(5.756333389411959, 8.952011225713635, 7.301135618422141, 7),
(5.108337403014788, 8.31154202432518, 11.359771531491097, 7),
(8.314898437378535, 9.185953513281046, 4.238233636005843, 7),
(8.201460399608226, 4.230965415446139, 11.589840844520428, 7),
(7.595604919273442, 4.88445113865134, 6.798265747221928, 7),
(8.378186361828917, 9.484819582257831, 8.022357890675561, 7),
(8.028236284464779, 9.757701617444052, 11.574198271062086, 7),
(8.229270762113973, 8.691786353579515, 6.350022396927342, 7),
(10.0, 3.3059509658558612, 3.1152259635487924, 7),
(9.756267998308681, 3.1863606517354883, 14.803384721914584, 7),
(10.0, 3.5046891678155427, 13.90160960971739, 7),
(10.0, 8.784136629159212, 6.218490965882184, 7),
(10.0, 8.37434528326138, 13.887493044276624, 7),
(10.0, 4.6140458786417, 14.68907159946693, 7),
(10.0, 8.03303730091703, 13.518172354943417, 7),
(2.7455640547144746, 1.6521001852026693, 5.569110673549164, 6),
(3.1452880891491906, 5.155515834056653, 8.595832717291, 6),
(2.5, 4.389047661368727, 4.950679151608691, 6),
(2.5, 4.394863837189541, 4.383231249423155, 6),
(2.5, 1.5580252510526358, 3.307282274836235, 6),
(5.045583268005572, 8.635334543903529, 9.59194524860244, 6),
(5.594284526041456, 8.6320252698003, 10.197201238166286, 6),
(5.988802467213943, 8.132531816914582, 12.30595195616923, 6),
(5.425850947396252, 5.185445600639579, 8.046156862703112, 6),
(5.369364240119585, 5.088077743168478, 7.340573827339962, 6),
(5.702045821590509, 5.271793984998375, 10.325652051724541, 6),
(5.411096326958829, 5.545898372969883, 5.292034843095026, 6),
(8.242968536635763, 9.082400742895011, 4.90020586532881, 6),
(8.050426422258862, 9.780537958506372, 18.978339720751418, 6),
(8.238754570485817, 8.602489911338367, 5.94133011037865, 6),
(8.39568424389748, 4.506736427736353, 9.461515968715135, 6),
(10.0, 5.138757136469953, 12.704963485646498, 6),
(10.0, 5.159912610631281, 15.6753707607594, 6),
(10.0, 5.549472965121217, 3.506573388368494, 6),
(10.0, 5.795090421330749, 14.063922879568509, 6),
(10.0, 6.983123234599715, 3.128443413944953, 6),
(10.0, 6.680204754366847, 11.632405914314647, 6),
(9.050263182466011, 6.721800647918977, 17.08367694275979, 6),
(10.0, 6.0634616201345715, 4.736966947326921, 6),
(9.409402543801862, 6.94420363069249, 6.28766021168659, 6),
(9.633394604006961, 7.505827554006868, 4.623044001702525, 6),
(9.020770192275748, 7.3138794160617016, 13.422245014577644, 6),
(9.26317609686154, 7.357994930871833, 15.233295182477667, 6),
(3.332782026387723, 7.225679089752617, 16.113419977677538, 5),
(2.5, 5.428663116358418, 6.5436496028361315, 5),
(2.5, 2.829072524106358, 2.0, 5),
(2.8285591842433737, 8.730390823623916, 21.473258817290873, 5),
(2.5, 8.17012010036135, 12.020108658634838, 5),
(2.5, 8.74354045618398, 14.42790441415372, 5),
(2.5, 4.638913962811717, 8.380243803410817, 5),
(3.363079416671538, 4.670651645625486, 2.7755096642090313, 5),
(5.339079962653624, 8.064094823108675, 16.611574939424255, 5),
(5.347356764781598, 8.43641762101464, 15.41216519823205, 5),
(5.368950609634622, 7.371653807185894, 7.038165919924306, 5),
(5.929552854535908, 6.895926920816455, 7.57281344704806, 5),
(5.72794655950891, 6.581660847859535, 10.668172633934036, 5),
(5.641782139668679, 6.458019104693064, 9.549016885745186, 5),
(5.344359642058747, 2.871097758194079, 5.430489560972486, 5),
(7.749909297802317, 4.328832721055091, 4.268933751175051, 5),
(8.145409228909998, 4.865021714408405, 7.545633529064384, 5),
(7.907253670159305, 5.688395096546548, 10.770986229289623, 5),
(7.592508492261312, 5.098997604455221, 4.933568344499713, 5),
(7.674872690410821, 5.441049019888879, 3.5502452884794837, 5),
(7.991979987062054, 6.616295483614106, 3.2837012487472252, 5),
(9.345599185286883, 7.224736586735167, 17.48852175788182, 5),
(9.659595218511388, 7.899577776723924, 3.3572177484844636, 5))
MUNSELL_GREYS_SPECIFICATIONS = np.linspace(0, 10, 25)
MUNSELL_EVEN_SPECIFICATIONS = (
(2.5, 5.0, 12.0, 4),
(2.5, 5.0, 32.0, 4),
(2.5, 5.0, 22.0, 4),
(2.5, 5.0, 32.0, 4),
(2.5, 6.0, 18.0, 4),
(2.5, 6.0, 32.0, 4),
(2.5, 6.0, 6.0, 4),
(2.5, 5.0, 42.0, 4),
(2.5, 5.0, 26.0, 4),
(2.5, 5.0, 48.0, 4),
(2.5, 2.0, 14.0, 4),
(2.5, 2.0, 14.0, 4),
(2.5, 0.0, 14.0, 4),
(2.5, 0.0, 2.0, 4),
(5.0, 1.0, 46.0, 4),
(5.0, 1.0, 38.0, 4),
(5.0, 1.0, 12.0, 4),
(5.0, 1.0, 10.0, 4),
(5.0, 4.0, 16.0, 4),
(5.0, 2.0, 44.0, 4),
(5.0, 7.0, 2.0, 4),
(5.0, 7.0, 8.0, 4),
(5.0, 7.0, 32.0, 4),
(7.5, 2.0, 28.0, 4),
(7.5, 2.0, 12.0, 4),
(7.5, 2.0, 34.0, 4),
(7.5, 4.0, 24.0, 4),
(7.5, 4.0, 10.0, 4),
(7.5, 4.0, 18.0, 4),
(7.5, 9.0, 44.0, 4),
(7.5, 5.0, 12.0, 4),
(7.5, 5.0, 40.0, 4),
(7.5, 5.0, 30.0, 4),
(7.5, 5.0, 12.0, 4),
(10.0, 3.0, 38.0, 4),
(10.0, 3.0, 16.0, 4),
(10.0, 3.0, 32.0, 4),
(10.0, 3.0, 44.0, 4),
(10.0, 3.0, 42.0, 4),
(10.0, 3.0, 34.0, 4),
(10.0, 3.0, 18.0, 4),
(10.0, 7.0, 10.0, 4),
(10.0, 7.0, 40.0, 4),
(10.0, 7.0, 12.0, 4),
(10.0, 6.0, 42.0, 4),
(10.0, 6.0, 6.0, 4),
(10.0, 4.0, 40.0, 4),
(2.5, 7.0, 28.0, 3),
(2.5, 7.0, 26.0, 3),
(2.5, 9.0, 44.0, 3),
(2.5, 9.0, 26.0, 3),
(2.5, 0.0, 32.0, 3),
(2.5, 0.0, 26.0, 3),
(2.5, 8.0, 30.0, 3),
(2.5, 8.0, 30.0, 3),
(2.5, 8.0, 6.0, 3),
(2.5, 6.0, 32.0, 3),
(2.5, 6.0, 12.0, 3),
(5.0, 7.0, 28.0, 3),
(5.0, 7.0, 26.0, 3),
(5.0, 7.0, 46.0, 3),
(5.0, 7.0, 10.0, 3),
(5.0, 6.0, 10.0, 3),
(5.0, 6.0, 44.0, 3),
(5.0, 1.0, 2.0, 3),
(5.0, 9.0, 34.0, 3),
(5.0, 9.0, 30.0, 3),
(7.5, 3.0, 12.0, 3),
(7.5, 7.0, 26.0, 3),
(7.5, 7.0, 18.0, 3),
(7.5, 7.0, 42.0, 3),
(7.5, 7.0, 20.0, 3),
(7.5, 7.0, 16.0, 3),
(7.5, 3.0, 36.0, 3),
(7.5, 3.0, 38.0, 3),
(7.5, 3.0, 14.0, 3),
(7.5, 2.0, 30.0, 3),
(7.5, 2.0, 12.0, 3),
(7.5, 2.0, 8.0, 3),
(7.5, 2.0, 6.0, 3),
(7.5, 6.0, 34.0, 3),
(7.5, 6.0, 12.0, 3),
(10.0, 4.0, 14.0, 3),
(10.0, 4.0, 40.0, 3),
(10.0, 5.0, 2.0, 3),
(10.0, 5.0, 26.0, 3),
(10.0, 6.0, 40.0, 3),
(10.0, 6.0, 46.0, 3),
(10.0, 6.0, 18.0, 3),
(10.0, 6.0, 38.0, 3),
(10.0, 3.0, 16.0, 3),
(10.0, 3.0, 32.0, 3),
(10.0, 3.0, 26.0, 3),
(10.0, 3.0, 22.0, 3),
(10.0, 8.0, 2.0, 3),
(10.0, 8.0, 10.0, 3),
(10.0, 8.0, 12.0, 3),
(10.0, 8.0, 18.0, 3),
(10.0, 8.0, 44.0, 3),
(2.5, 8.0, 2.0, 2),
(2.5, 8.0, 42.0, 2),
(2.5, 7.0, 34.0, 2),
(2.5, 4.0, 36.0, 2),
(2.5, 4.0, 34.0, 2),
(2.5, 4.0, 22.0, 2),
(2.5, 0.0, 42.0, 2),
(2.5, 0.0, 32.0, 2),
(2.5, 1.0, 28.0, 2),
(2.5, 1.0, 2.0, 2),
(2.5, 1.0, 24.0, 2),
(2.5, 1.0, 12.0, 2),
(5.0, 5.0, 22.0, 2),
(5.0, 5.0, 46.0, 2),
(5.0, 5.0, 24.0, 2),
(5.0, 1.0, 48.0, 2),
(5.0, 1.0, 12.0, 2),
(5.0, 1.0, 16.0, 2),
(5.0, 1.0, 2.0, 2),
(5.0, 1.0, 18.0, 2),
(5.0, 8.0, 28.0, 2),
(5.0, 8.0, 32.0, 2),
(5.0, 8.0, 24.0, 2),
(5.0, 8.0, 38.0, 2),
(5.0, 2.0, 24.0, 2),
(5.0, 2.0, 4.0, 2),
(5.0, 2.0, 32.0, 2),
(5.0, 2.0, 38.0, 2),
(5.0, 9.0, 36.0, 2),
(5.0, 9.0, 34.0, 2),
(5.0, 9.0, 4.0, 2),
(7.5, 7.0, 28.0, 2),
(7.5, 7.0, 10.0, 2),
(7.5, 7.0, 48.0, 2),
(7.5, 9.0, 48.0, 2),
(7.5, 9.0, 48.0, 2),
(7.5, 9.0, 30.0, 2),
(7.5, 5.0, 42.0, 2),
(7.5, 5.0, 46.0, 2),
(7.5, 6.0, 26.0, 2),
(7.5, 6.0, 28.0, 2),
(7.5, 6.0, 22.0, 2),
(7.5, 6.0, 10.0, 2),
(7.5, 6.0, 32.0, 2),
(7.5, 6.0, 32.0, 2),
(10.0, 7.0, 10.0, 2),
(10.0, 7.0, 30.0, 2),
(10.0, 7.0, 30.0, 2),
(10.0, 7.0, 14.0, 2),
(10.0, 7.0, 10.0, 2),
(10.0, 7.0, 12.0, 2),
(10.0, 8.0, 12.0, 2),
(10.0, 8.0, 28.0, 2),
(10.0, 8.0, 42.0, 2),
(10.0, 8.0, 4.0, 2),
(10.0, 8.0, 10.0, 2),
(10.0, 8.0, 22.0, 2),
(10.0, 9.0, 6.0, 2),
(10.0, 9.0, 38.0, 2),
(2.5, 2.0, 18.0, 1),
(2.5, 2.0, 24.0, 1),
(2.5, 9.0, 18.0, 1),
(2.5, 9.0, 28.0, 1),
(2.5, 9.0, 20.0, 1),
(2.5, 4.0, 14.0, 1),
(2.5, 4.0, 36.0, 1),
(2.5, 4.0, 26.0, 1),
(2.5, 3.0, 22.0, 1),
(2.5, 3.0, 42.0, 1),
(2.5, 3.0, 32.0, 1),
(2.5, 3.0, 16.0, 1),
(2.5, 3.0, 38.0, 1),
(2.5, 9.0, 2.0, 1),
(2.5, 9.0, 8.0, 1),
(2.5, 9.0, 26.0, 1),
(2.5, 9.0, 42.0, 1),
(5.0, 2.0, 2.0, 1),
(5.0, 2.0, 14.0, 1),
(5.0, 2.0, 8.0, 1),
(5.0, 1.0, 20.0, 1),
(5.0, 1.0, 32.0, 1),
(5.0, 3.0, 48.0, 1),
(5.0, 0.0, 42.0, 1),
(7.5, 3.0, 2.0, 1),
(7.5, 3.0, 36.0, 1),
(7.5, 5.0, 32.0, 1),
(7.5, 5.0, 20.0, 1),
(7.5, 5.0, 34.0, 1),
(7.5, 0.0, 6.0, 1),
(7.5, 0.0, 12.0, 1),
(7.5, 8.0, 48.0, 1),
(7.5, 8.0, 32.0, 1),
(7.5, 8.0, 4.0, 1),
(10.0, 5.0, 22.0, 1),
(10.0, 5.0, 18.0, 1),
(10.0, 5.0, 46.0, 1),
(10.0, 5.0, 12.0, 1),
(10.0, 5.0, 30.0, 1),
(10.0, 7.0, 36.0, 1),
(10.0, 7.0, 30.0, 1),
(10.0, 7.0, 20.0, 1),
(10.0, 7.0, 38.0, 1),
(10.0, 7.0, 20.0, 1),
(10.0, 1.0, 18.0, 1),
(10.0, 1.0, 10.0, 1),
(10.0, 1.0, 18.0, 1),
(10.0, 1.0, 20.0, 1),
(10.0, 0.0, 12.0, 1),
(10.0, 0.0, 46.0, 1),
(10.0, 0.0, 38.0, 1),
(2.5, 7.0, 40.0, 10),
(2.5, 7.0, 22.0, 10),
(2.5, 4.0, 12.0, 10),
(2.5, 4.0, 32.0, 10),
(2.5, 4.0, 36.0, 10),
(2.5, 0.0, 20.0, 10),
(2.5, 0.0, 30.0, 10),
(2.5, 3.0, 40.0, 10),
(2.5, 3.0, 10.0, 10),
(2.5, 8.0, 42.0, 10),
(2.5, 8.0, 4.0, 10),
(2.5, 8.0, 44.0, 10),
(2.5, 8.0, 32.0, 10),
(2.5, 8.0, 24.0, 10),
(5.0, 9.0, 42.0, 10),
(5.0, 9.0, 18.0, 10),
(5.0, 9.0, 2.0, 10),
(5.0, 7.0, 46.0, 10),
(5.0, 7.0, 42.0, 10),
(5.0, 7.0, 34.0, 10),
(5.0, 0.0, 46.0, 10),
(5.0, 0.0, 8.0, 10),
(5.0, 5.0, 28.0, 10),
(5.0, 1.0, 4.0, 10),
(5.0, 1.0, 10.0, 10),
(5.0, 1.0, 26.0, 10),
(7.5, 3.0, 26.0, 10),
(7.5, 3.0, 42.0, 10),
(7.5, 3.0, 36.0, 10),
(7.5, 0.0, 16.0, 10),
(7.5, 0.0, 40.0, 10),
(7.5, 2.0, 4.0, 10),
(7.5, 2.0, 14.0, 10),
(7.5, 2.0, 46.0, 10),
(7.5, 8.0, 38.0, 10),
(7.5, 8.0, 6.0, 10),
(7.5, 8.0, 24.0, 10),
(7.5, 8.0, 20.0, 10),
(7.5, 0.0, 48.0, 10),
(7.5, 0.0, 20.0, 10),
(7.5, 0.0, 46.0, 10),
(7.5, 0.0, 38.0, 10),
(10.0, 2.0, 32.0, 10),
(10.0, 2.0, 10.0, 10),
(10.0, 2.0, 30.0, 10),
(10.0, 8.0, 14.0, 10),
(10.0, 8.0, 24.0, 10),
(10.0, 8.0, 44.0, 10),
(10.0, 9.0, 28.0, 10),
(10.0, 9.0, 36.0, 10),
(10.0, 9.0, 12.0, 10),
(10.0, 6.0, 20.0, 10),
(10.0, 6.0, 46.0, 10),
(10.0, 6.0, 20.0, 10),
(10.0, 6.0, 28.0, 10),
(10.0, 6.0, 16.0, 10),
(10.0, 6.0, 44.0, 10),
(10.0, 6.0, 28.0, 10),
(2.5, 6.0, 6.0, 9),
(2.5, 5.0, 24.0, 9),
(2.5, 5.0, 6.0, 9),
(2.5, 2.0, 42.0, 9),
(2.5, 2.0, 24.0, 9),
(2.5, 2.0, 36.0, 9),
(2.5, 2.0, 42.0, 9),
(2.5, 2.0, 16.0, 9),
(2.5, 2.0, 22.0, 9),
(2.5, 2.0, 26.0, 9),
(2.5, 2.0, 36.0, 9),
(2.5, 8.0, 30.0, 9),
(2.5, 8.0, 6.0, 9),
(5.0, 9.0, 6.0, 9),
(5.0, 9.0, 22.0, 9),
(5.0, 9.0, 42.0, 9),
(5.0, 1.0, 10.0, 9),
(5.0, 2.0, 32.0, 9),
(5.0, 2.0, 28.0, 9),
(5.0, 0.0, 34.0, 9),
(5.0, 0.0, 22.0, 9),
(5.0, 4.0, 2.0, 9),
(5.0, 4.0, 2.0, 9),
(5.0, 4.0, 4.0, 9),
(7.5, 5.0, 6.0, 9),
(7.5, 5.0, 28.0, 9),
(7.5, 3.0, 2.0, 9),
(7.5, 3.0, 34.0, 9),
(7.5, 3.0, 8.0, 9),
(7.5, 7.0, 46.0, 9),
(7.5, 9.0, 34.0, 9),
(7.5, 9.0, 44.0, 9),
(7.5, 4.0, 10.0, 9),
(7.5, 4.0, 10.0, 9),
(10.0, 4.0, 16.0, 9),
(10.0, 4.0, 4.0, 9),
(10.0, 1.0, 44.0, 9),
(10.0, 1.0, 16.0, 9),
(10.0, 1.0, 30.0, 9),
(10.0, 1.0, 44.0, 9),
(10.0, 3.0, 4.0, 9),
(10.0, 3.0, 46.0, 9),
(10.0, 0.0, 14.0, 9),
(2.5, 1.0, 4.0, 8),
(2.5, 1.0, 18.0, 8),
(2.5, 1.0, 8.0, 8),
(2.5, 3.0, 32.0, 8),
(2.5, 3.0, 28.0, 8),
(2.5, 3.0, 46.0, 8),
(2.5, 3.0, 12.0, 8),
(2.5, 3.0, 18.0, 8),
(2.5, 0.0, 34.0, 8),
(2.5, 0.0, 22.0, 8),
(2.5, 2.0, 22.0, 8),
(2.5, 2.0, 14.0, 8),
(2.5, 2.0, 42.0, 8),
(2.5, 0.0, 16.0, 8),
(5.0, 4.0, 24.0, 8),
(5.0, 4.0, 26.0, 8),
(5.0, 0.0, 26.0, 8),
(5.0, 2.0, 44.0, 8),
(5.0, 2.0, 38.0, 8),
(5.0, 2.0, 48.0, 8),
(5.0, 2.0, 26.0, 8),
(5.0, 2.0, 6.0, 8),
(5.0, 4.0, 12.0, 8),
(5.0, 9.0, 36.0, 8),
(5.0, 9.0, 48.0, 8),
(5.0, 9.0, 16.0, 8),
(5.0, 9.0, 6.0, 8),
(7.5, 0.0, 34.0, 8),
(7.5, 6.0, 10.0, 8),
(7.5, 6.0, 12.0, 8),
(7.5, 6.0, 10.0, 8),
(7.5, 9.0, 26.0, 8),
(7.5, 9.0, 6.0, 8),
(7.5, 9.0, 40.0, 8),
(7.5, 9.0, 18.0, 8),
(7.5, 4.0, 20.0, 8),
(7.5, 4.0, 42.0, 8),
(7.5, 4.0, 16.0, 8),
(10.0, 5.0, 24.0, 8),
(10.0, 5.0, 44.0, 8),
(10.0, 5.0, 20.0, 8),
(10.0, 5.0, 22.0, 8),
(10.0, 3.0, 16.0, 8),
(10.0, 3.0, 14.0, 8),
(10.0, 3.0, 6.0, 8),
(10.0, 2.0, 24.0, 8),
(10.0, 2.0, 36.0, 8),
(10.0, 2.0, 46.0, 8),
(10.0, 0.0, 42.0, 8),
(10.0, 0.0, 26.0, 8),
(10.0, 0.0, 42.0, 8),
(10.0, 0.0, 10.0, 8),
(10.0, 9.0, 12.0, 8),
(10.0, 9.0, 8.0, 8),
(2.5, 4.0, 42.0, 7),
(2.5, 4.0, 14.0, 7),
(2.5, 4.0, 46.0, 7),
(2.5, 4.0, 18.0, 7),
(2.5, 2.0, 2.0, 7),
(2.5, 2.0, 38.0, 7),
(2.5, 2.0, 14.0, 7),
(2.5, 8.0, 26.0, 7),
(2.5, 7.0, 12.0, 7),
(2.5, 7.0, 46.0, 7),
(2.5, 1.0, 44.0, 7),
(5.0, 8.0, 26.0, 7),
(5.0, 0.0, 46.0, 7),
(5.0, 9.0, 44.0, 7),
(5.0, 9.0, 16.0, 7),
(5.0, 9.0, 40.0, 7),
(5.0, 5.0, 14.0, 7),
(5.0, 7.0, 6.0, 7),
(5.0, 7.0, 30.0, 7),
(7.5, 1.0, 16.0, 7),
(7.5, 1.0, 18.0, 7),
(7.5, 1.0, 30.0, 7),
(7.5, 1.0, 4.0, 7),
(7.5, 1.0, 10.0, 7),
(7.5, 1.0, 40.0, 7),
(7.5, 1.0, 18.0, 7),
(7.5, 3.0, 14.0, 7),
(7.5, 3.0, 48.0, 7),
(7.5, 3.0, 48.0, 7),
(7.5, 3.0, 6.0, 7),
(7.5, 0.0, 36.0, 7),
(7.5, 0.0, 42.0, 7),
(7.5, 0.0, 22.0, 7),
(10.0, 9.0, 8.0, 7),
(10.0, 9.0, 8.0, 7),
(10.0, 7.0, 36.0, 7),
(10.0, 7.0, 46.0, 7),
(10.0, 7.0, 20.0, 7),
(10.0, 7.0, 14.0, 7),
(10.0, 4.0, 32.0, 7),
(10.0, 4.0, 16.0, 7),
(10.0, 1.0, 24.0, 7),
(10.0, 1.0, 40.0, 7),
(10.0, 1.0, 18.0, 7),
(10.0, 1.0, 24.0, 7),
(10.0, 7.0, 30.0, 7),
(10.0, 7.0, 30.0, 7),
(2.5, 5.0, 24.0, 6),
(2.5, 5.0, 42.0, 6),
(2.5, 8.0, 34.0, 6),
(2.5, 8.0, 34.0, 6),
(2.5, 8.0, 48.0, 6),
(2.5, 8.0, 26.0, 6),
(2.5, 3.0, 28.0, 6),
(5.0, 4.0, 44.0, 6),
(5.0, 4.0, 46.0, 6),
(5.0, 6.0, 28.0, 6),
(5.0, 6.0, 46.0, 6),
(5.0, 2.0, 10.0, 6),
(5.0, 2.0, 4.0, 6),
(5.0, 2.0, 34.0, 6),
(5.0, 4.0, 46.0, 6),
(5.0, 4.0, 20.0, 6),
(7.5, 7.0, 2.0, 6),
(7.5, 6.0, 46.0, 6),
(7.5, 6.0, 48.0, 6),
(7.5, 6.0, 36.0, 6),
(7.5, 6.0, 42.0, 6),
(10.0, 7.0, 8.0, 6),
(10.0, 8.0, 18.0, 6),
(10.0, 8.0, 42.0, 6),
(10.0, 8.0, 32.0, 6),
(10.0, 2.0, 22.0, 6),
(10.0, 2.0, 28.0, 6),
(10.0, 2.0, 6.0, 6),
(10.0, 2.0, 36.0, 6),
(10.0, 2.0, 48.0, 6),
(10.0, 2.0, 28.0, 6),
(10.0, 2.0, 36.0, 6),
(10.0, 2.0, 6.0, 6),
(10.0, 9.0, 24.0, 6),
(2.5, 6.0, 42.0, 5),
(2.5, 6.0, 8.0, 5),
(2.5, 7.0, 20.0, 5),
(2.5, 7.0, 4.0, 5),
(2.5, 7.0, 16.0, 5),
(2.5, 5.0, 34.0, 5),
(2.5, 5.0, 22.0, 5),
(2.5, 2.0, 26.0, 5),
(2.5, 2.0, 38.0, 5),
(2.5, 2.0, 30.0, 5),
(5.0, 9.0, 42.0, 5),
(5.0, 9.0, 20.0, 5),
(5.0, 9.0, 32.0, 5),
(5.0, 6.0, 46.0, 5),
(5.0, 6.0, 14.0, 5),
(5.0, 0.0, 38.0, 5),
(5.0, 0.0, 16.0, 5),
(5.0, 4.0, 16.0, 5),
(5.0, 4.0, 42.0, 5),
(5.0, 4.0, 8.0, 5),
(5.0, 9.0, 2.0, 5),
(5.0, 9.0, 26.0, 5),
(7.5, 4.0, 20.0, 5),
(7.5, 4.0, 6.0, 5),
(7.5, 9.0, 6.0, 5),
(7.5, 9.0, 48.0, 5),
(7.5, 9.0, 46.0, 5),
(7.5, 9.0, 36.0, 5),
(7.5, 6.0, 40.0, 5),
(7.5, 3.0, 28.0, 5),
(7.5, 3.0, 24.0, 5),
(7.5, 7.0, 14.0, 5),
(7.5, 7.0, 26.0, 5),
(7.5, 7.0, 48.0, 5),
(7.5, 7.0, 16.0, 5),
(10.0, 4.0, 42.0, 5),
(10.0, 9.0, 42.0, 5),
(10.0, 9.0, 6.0, 5),
(10.0, 9.0, 12.0, 5),
(10.0, 0.0, 16.0, 5),
(10.0, 0.0, 14.0, 5),
(10.0, 8.0, 28.0, 5),
(10.0, 8.0, 12.0, 5),
(10.0, 8.0, 34.0, 5),
(10.0, 6.0, 22.0, 5),
(10.0, 6.0, 44.0, 5),
(10.0, 6.0, 32.0, 5))
MUNSELL_BOUNDING_HUES = (
((2.5, 4), (2.5, 4)),
((2.5, 4), (2.5, 4)),
((2.5, 4), (2.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((5.0, 4), (7.5, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((10.0, 4), (10.0, 4)),
((7.5, 4), (10, 4)),
((7.5, 4), (10, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((10.0, 4), (10.0, 4)),
((7.5, 4), (10, 4)),
((10.0, 4), (10.0, 4)),
((7.5, 4), (10, 4)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (2.5, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((2.5, 3), (5.0, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((5.0, 3), (7.5, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((10.0, 3), (10.0, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((7.5, 3), (10, 3)),
((10.0, 3), (10.0, 3)),
((10.0, 3), (10.0, 3)),
((7.5, 3), (10, 3)),
((10.0, 3), (10.0, 3)),
((10.0, 3), (10.0, 3)),
((10.0, 3), (10.0, 3)),
((2.5, 2), (2.5, 2)),
((2.5, 2), (2.5, 2)),
((2.5, 2), (5.0, 2)),
((2.5, 2), (5.0, 2)),
((2.5, 2), (2.5, 2)),
((2.5, 2), (5.0, 2)),
((2.5, 2), (5.0, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((5.0, 2), (7.5, 2)),
((7.5, 2), (10, 2)),
((7.5, 2), (10, 2)),
((10.0, 2), (10.0, 2)),
((10.0, 2), (10.0, 2)),
((7.5, 2), (10, 2)),
((7.5, 2), (10, 2)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (2.5, 1)),
((2.5, 1), (5.0, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((5.0, 1), (7.5, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((7.5, 1), (10, 1)),
((10.0, 1), (10.0, 1)),
((7.5, 1), (10, 1)),
((10.0, 1), (10.0, 1)),
((10.0, 1), (10.0, 1)),
((2.5, 10), (5.0, 10)),
((2.5, 10), (5.0, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((2.5, 10), (2.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((5.0, 10), (7.5, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((10.0, 10), (10.0, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((7.5, 10), (10, 10)),
((7.5, 10), (10, 10)),
((10.0, 10), (10.0, 10)),
((10.0, 10), (10.0, 10)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (2.5, 9)),
((2.5, 9), (5.0, 9)),
((2.5, 9), (5.0, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((5.0, 9), (7.5, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((7.5, 9), (10, 9)),
((10.0, 9), (10.0, 9)),
((7.5, 9), (10, 9)),
((10.0, 9), (10.0, 9)),
((10.0, 9), (10.0, 9)),
((2.5, 8), (2.5, 8)),
((2.5, 8), (5.0, 8)),
((2.5, 8), (5.0, 8)),
((2.5, 8), (5.0, 8)),
((2.5, 8), (2.5, 8)),
((2.5, 8), (5.0, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((5.0, 8), (7.5, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((10.0, 8), (10.0, 8)),
((7.5, 8), (10, 8)),
((10.0, 8), (10.0, 8)),
((7.5, 8), (10, 8)),
((7.5, 8), (10, 8)),
((10.0, 8), (10.0, 8)),
((10.0, 8), (10.0, 8)),
((10.0, 8), (10.0, 8)),
((7.5, 8), (10, 8)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (2.5, 7)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (2.5, 7)),
((2.5, 7), (5.0, 7)),
((2.5, 7), (2.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((5.0, 7), (7.5, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((7.5, 7), (10, 7)),
((10.0, 7), (10.0, 7)),
((7.5, 7), (10, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((10.0, 7), (10.0, 7)),
((2.5, 6), (5.0, 6)),
((2.5, 6), (5.0, 6)),
((2.5, 6), (2.5, 6)),
((2.5, 6), (2.5, 6)),
((2.5, 6), (2.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((5.0, 6), (7.5, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((10.0, 6), (10.0, 6)),
((7.5, 6), (10, 6)),
((10.0, 6), (10.0, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((7.5, 6), (10, 6)),
((2.5, 5), (5.0, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (5.0, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (2.5, 5)),
((2.5, 5), (5.0, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((5.0, 5), (7.5, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)),
((7.5, 5), (10, 5)))
MUNSELL_HUE_TO_ANGLE = (
(2.5, 1, 208.75),
(2.5, 2, 153.75),
(2.5, 3, 118.75),
(2.5, 4, 63.75),
(2.5, 5, 39.375),
(2.5, 6, 16.875),
(2.5, 7, 348.75),
(2.5, 8, 300.0),
(2.5, 9, 251.25),
(2.5, 10, 236.25),
(5.0, 1, 225.0),
(5.0, 2, 160.0),
(5.0, 3, 135.0),
(5.0, 4, 70.0),
(5.0, 5, 45.0),
(5.0, 6, 22.5),
(5.0, 7, 0.0),
(5.0, 8, 315.0),
(5.0, 9, 255.0),
(5.0, 10, 240.0),
(7.5, 1, 228.75),
(7.5, 2, 176.25),
(7.5, 3, 141.25),
(7.5, 4, 86.25),
(7.5, 5, 51.25),
(7.5, 6, 28.125),
(7.5, 7, 5.625),
(7.5, 8, 326.25),
(7.5, 9, 270.0),
(7.5, 10, 243.75),
(10.0, 1, 232.5),
(10.0, 2, 192.5),
(10.0, 3, 147.5),
(10.0, 4, 102.5),
(10.0, 5, 57.5),
(10.0, 6, 33.75),
(10.0, 7, 11.25),
(10.0, 8, 337.5),
(10.0, 9, 285.0),
(10.0, 10, 247.5))
MUNSELL_HUE_TO_ASTM_HUE = (
(2.5, 0, 72.5),
(2.5, 1, 62.5),
(2.5, 2, 52.5),
(2.5, 3, 42.5),
(2.5, 4, 32.5),
(2.5, 5, 22.5),
(2.5, 6, 12.5),
(2.5, 7, 2.5),
(2.5, 8, 92.5),
(2.5, 9, 82.5),
(2.5, 10, 72.5),
(5.0, 0, 75.0),
(5.0, 1, 65.0),
(5.0, 2, 55.0),
(5.0, 3, 45.0),
(5.0, 4, 35.0),
(5.0, 5, 25.0),
(5.0, 6, 15.0),
(5.0, 7, 5.0),
(5.0, 8, 95.0),
(5.0, 9, 85.0),
(5.0, 10, 75.0),
(7.5, 0, 77.5),
(7.5, 1, 67.5),
(7.5, 2, 57.5),
(7.5, 3, 47.5),
(7.5, 4, 37.5),
(7.5, 5, 27.5),
(7.5, 6, 17.5),
(7.5, 7, 7.5),
(7.5, 8, 97.5),
(7.5, 9, 87.5),
(7.5, 10, 77.5),
(10.0, 0, 80.0),
(10.0, 1, 70.0),
(10.0, 2, 60.0),
(10.0, 3, 50.0),
(10.0, 4, 40.0),
(10.0, 5, 30.0),
(10.0, 6, 20.0),
(10.0, 7, 10.0),
(10.0, 8, 100.0),
(10.0, 9, 90.0),
(10.0, 10, 80.0))
MUNSELL_INTERPOLATION_METHODS = (
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Radial',
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
None,
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
None,
None,
'Radial',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
None,
None,
None,
None,
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
None,
None,
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
None,
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
None,
None,
'Radial',
'Radial',
'Radial',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Radial',
'Radial',
'Radial',
'Linear',
'Linear',
'Linear',
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
None,
None,
'Linear',
'Radial',
'Linear',
'Radial',
'Radial',
'Radial')
MUNSELL_XY_FROM_RENOTATION_OVOID = (
(0.4333, 0.5602),
None,
None,
None,
None,
None,
(0.3799, 0.447),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
(0.3284, 0.3559),
(0.3722, 0.4669),
None,
None,
(0.274, 0.879),
None,
None,
(0.3395, 0.5913),
(0.303, 0.809),
None,
(0.345, 0.5949),
None,
None,
(0.345, 0.5949),
None,
(0.202, 0.807),
None,
None,
None,
None,
(0.168, 0.88),
(0.3123, 0.4732),
None,
(0.3092, 0.5095),
None,
(0.3128, 0.4175),
None,
(0.149, 0.681),
(0.1689, 0.6549),
None,
(0.206, 0.619),
None,
None,
(0.163, 0.67),
(0.163, 0.67),
(0.2952, 0.3851),
(0.069, 0.764),
(0.2574, 0.4814),
(0.123, 0.546),
(0.1397, 0.5312),
None,
(0.2554, 0.4087),
(0.2466, 0.4181),
None,
(0.2833, 0.3564),
None,
None,
(0.1516, 0.4505),
(0.1303, 0.4858),
(0.1841, 0.4448),
None,
(0.1688, 0.457),
(0.1982, 0.433),
None,
None,
(0.1262, 0.4667),
None,
(0.1022, 0.4759),
(0.1842, 0.4244),
(0.22, 0.3983),
(0.034, 0.546),
(0.2171, 0.4138),
(0.1398, 0.4168),
None,
(0.291, 0.331),
(0.069, 0.4542),
None,
None,
(0.1551, 0.4208),
None,
(0.0925, 0.4275),
None,
None,
(0.0333, 0.4444),
(0.2957, 0.3293),
(0.243, 0.371),
(0.2282, 0.3811),
(0.1866, 0.4086),
None,
(0.294, 0.3268),
None,
None,
None,
None,
(0.0636, 0.3788),
None,
None,
None,
(0.26, 0.3289),
None,
None,
(0.0781, 0.3211),
None,
(0.067, 0.32),
None,
None,
None,
(0.25, 0.3141),
None,
None,
None,
(0.122, 0.351),
None,
None,
(0.2234, 0.315),
None,
None,
None,
None,
(0.2768, 0.3287),
None,
(0.2094, 0.3165),
None,
None,
None,
None,
None,
None,
(0.068, 0.283),
None,
(0.092, 0.29),
(0.1961, 0.311),
None,
None,
(0.2035, 0.2956),
None,
None,
(0.1671, 0.2832),
(0.2035, 0.2956),
(0.1841, 0.2892),
(0.1937, 0.2978),
None,
None,
(0.2686, 0.313),
(0.212, 0.3025),
(0.118, 0.273),
(0.2501, 0.3118),
None,
None,
None,
None,
None,
None,
(0.1027, 0.2057),
None,
None,
None,
None,
None,
(0.065, 0.17),
None,
(0.2909, 0.3125),
(0.228, 0.296),
None,
None,
(0.2559, 0.2874),
None,
(0.1245, 0.1827),
None,
None,
None,
None,
(0.2616, 0.2857),
None,
None,
(0.098, 0.146),
None,
None,
None,
None,
None,
(0.2688, 0.2956),
(0.096, 0.126),
(0.1203, 0.1505),
None,
(0.1666, 0.1964),
None,
None,
None,
(0.128, 0.162),
None,
(0.128, 0.162),
None,
(0.084, 0.094),
None,
None,
None,
None,
None,
None,
None,
(0.1634, 0.1698),
None,
None,
None,
None,
None,
(0.1576, 0.16),
None,
(0.2758, 0.2879),
None,
None,
None,
None,
None,
(0.2991, 0.3057),
None,
None,
None,
None,
None,
(0.109, 0.079),
(0.2012, 0.1867),
(0.1285, 0.087),
(0.095, 0.027),
(0.1642, 0.0655),
(0.157, 0.034),
(0.159, 0.044),
None,
None,
(0.242, 0.2148),
(0.1762, 0.0955),
(0.161, 0.016),
None,
(0.2702, 0.2648),
None,
None,
None,
None,
None,
None,
(0.1918, 0.0379),
(0.22, 0.133),
(0.1925, 0.042),
(0.24, 0.196),
None,
None,
None,
None,
None,
(0.214, 0.143),
None,
(0.214, 0.143),
(0.194, 0.101),
(0.2265, 0.1671),
None,
(0.194, 0.101),
(0.2842, 0.255),
(0.2372, 0.1223),
(0.2806, 0.2444),
(0.218, 0.022),
(0.2277, 0.0621),
(0.22, 0.031),
(0.218, 0.022),
(0.2372, 0.098),
(0.2298, 0.0696),
(0.226, 0.0555),
(0.22, 0.031),
None,
(0.2881, 0.2671),
(0.296, 0.271),
None,
None,
(0.2701, 0.1178),
(0.254, 0.039),
(0.2559, 0.0525),
None,
None,
(0.3022, 0.2825),
(0.3022, 0.2825),
(0.2958, 0.2565),
(0.3093, 0.2555),
(0.3018, 0.1253),
(0.3088, 0.274),
(0.291, 0.06),
(0.3037, 0.1981),
None,
None,
None,
(0.3056, 0.206),
(0.3056, 0.206),
(0.337, 0.1756),
(0.321, 0.2686),
None,
(0.3078, 0.0839),
None,
None,
(0.3214, 0.2517),
None,
None,
(0.329, 0.2095),
(0.337, 0.08),
(0.3342, 0.1551),
None,
(0.414, 0.102),
None,
(0.3754, 0.1898),
(0.3929, 0.1506),
None,
None,
(0.383, 0.096),
(0.3711, 0.1449),
None,
None,
(0.473, 0.172),
(0.482, 0.162),
None,
None,
None,
None,
None,
(0.3708, 0.238),
(0.4104, 0.2361),
None,
None,
(0.4, 0.263),
(0.3431, 0.2988),
None,
(0.396, 0.286),
(0.4125, 0.2784),
(0.396, 0.286),
None,
(0.3512, 0.3052),
None,
None,
(0.513, 0.2101),
None,
(0.4799, 0.2329),
(0.57, 0.24),
None,
(0.5396, 0.2535),
(0.554, 0.246),
(0.5628, 0.2241),
(0.538, 0.2369),
(0.4218, 0.2864),
None,
None,
None,
None,
None,
None,
None,
(0.414, 0.302),
(0.376, 0.31),
None,
(0.5369, 0.281),
None,
(0.5898, 0.2622),
(0.3614, 0.3033),
None,
(0.5734, 0.2083),
None,
(0.4435, 0.3119),
None,
None,
None,
None,
None,
None,
None,
(0.5341, 0.3158),
(0.3805, 0.3244),
None,
None,
None,
None,
(0.466, 0.2888),
(0.6111, 0.229),
None,
None,
(0.6492, 0.3012),
None,
None,
(0.4738, 0.3316),
None,
None,
None,
(0.416, 0.35),
(0.416, 0.35),
None,
None,
(0.592, 0.374),
(0.5234, 0.37),
None,
(0.6409, 0.3533),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
(0.602, 0.405),
None,
None,
None,
None,
None,
(0.684, 0.415),
(0.4674, 0.3738),
None,
None,
None,
(0.3437, 0.3397),
None,
None,
None,
None,
(0.4399, 0.4164),
(0.5179, 0.467),
None,
None,
None,
None,
(0.545, 0.458),
None,
None,
None,
None,
(0.545, 0.458),
(0.532, 0.478),
None,
(0.4517, 0.4421),
(0.516, 0.492),
(0.3761, 0.38),
(0.5049, 0.4843),
None,
None,
None,
None,
None,
None,
(0.483, 0.5092),
None,
None,
(0.4905, 0.5038),
None,
None,
None,
None,
(0.4745, 0.481),
(0.3378, 0.3504),
None,
None,
(0.4331, 0.4688),
(0.3811, 0.4123),
None,
None,
None,
None,
None,
None,
(0.4652, 0.5128),
None,
None,
(0.4728, 0.5215),
None,
None,
(0.3761, 0.4155),
(0.4271, 0.492),
None,
None,
None,
(0.4341, 0.502),
None,
None,
None,
None)
MUNSELL_SPECIFICATIONS_TO_XY = (
((2.5, 8.0, 11.928546308350969, 4),
(0.41492483295053395, 0.5123568112702328)),
((2.5, 6.0, 6.923610826208884, 4),
(0.38945937205126197, 0.46616492464383436)),
((3.0588107073010358, 6.0, 14.50196667770457, 4),
(0.42971427832243203, 0.5665812104155267)),
((2.5, 5.0, 2.0, 4), (0.3352, 0.3636)),
((5.613007062442384, 8.0, 18.56590894044391, 4),
(0.39909107942315, 0.5888839244592698)),
((5.845640071004907, 8.0, 5.782325614552295, 4),
(0.3476544320849577, 0.415451228906664)),
((5.780794121059599, 3.0, 3.3492086825591487, 4),
(0.3397413408138117, 0.4168925891450931)),
((5.483684299639117, 4.0, 5.761459062506715, 4),
(0.3624285934444264, 0.4768182836607108)),
((5.809580308813496, 6.0, 6.662613753958899, 4),
(0.35692510294957597, 0.4554043675955618)),
((5.209252955662903, 3.0, 5.141472643810014, 4),
(0.36331762907025356, 0.4808425142327246)),
((7.706105853911573, 3.0, 11.396648897274897, 4),
(0.3131610795605752, 0.693666965393307)),
((8.117640564564343, 3.0, 3.1653563832640272, 4),
(0.3191717580108967, 0.3988687571291857)),
((7.8731203012311255, 3.0, 13.241107969297714, 4),
(0.2945431122633574, 0.7584911897992045)),
((8.04983322214289, 2.0, 7.501924679081063, 4),
(0.3074690943666493, 0.6094306766573039)),
((8.355307569391062, 3.0, 11.925441344336392, 4),
(0.2925864520380243, 0.7013043294887223)),
((8.342795760577609, 1.0, 6.298818145909256, 4),
(0.2563315496261501, 0.7193319941337727)),
((7.5947244020062845, 2.0, 4.626613135331287, 4),
(0.32400993792893495, 0.47353910662213033)),
((8.19517786608579, 9.0, 23.571122010181508, 4),
(0.3393737554129363, 0.6530759138839242)),
((7.754763634912469, 8.0, 21.00944901061068, 4),
(0.3524522658493676, 0.6357582251092715)),
((9.010231962978862, 6.0, 6.803370568930175, 4),
(0.3215881688765898, 0.4404084623233154)),
((9.041566851651622, 6.0, 17.010037203566448, 4),
(0.3077087669482304, 0.6419582684061792)),
((9.915652169827913, 9.0, 11.13108215988432, 4),
(0.3156394420758696, 0.47190649633818943)),
((10.0, 9.0, 27.322046186799103, 4),
(0.27105079215940403, 0.6981866004283883)),
((9.961336111598143, 8.0, 13.20009863344056, 4),
(0.3110050190589885, 0.512285067166623)),
((9.887406551063181, 8.0, 2.0660963235598375, 4),
(0.3124556963039024, 0.3470190461737042)),
((10.0, 3.0, 2.5700932200974145, 4),
(0.30780233686482955, 0.37333504024765457)),
((10.0, 3.0, 13.514066607169514, 4),
(0.23432557407109803, 0.7247335078491779)),
((10.0, 5.0, 12.753899774963989, 4),
(0.29015511114768366, 0.5923266098579271)),
((10.0, 6.0, 15.244598276849418, 4),
(0.29059930775417764, 0.6049052757954609)),
((10.0, 5.0, 27.929001019877095, 4),
(0.1548874872515363, 0.9471895260068659)),
((9.757039645743053, 6.0, 3.4112871270786895, 4),
(0.31331400537253434, 0.37344423041130265)),
((10.0, 6.0, 24.86360130658431, 4),
(0.23238659150720198, 0.7984084248251019)),
((9.862075817629322, 4.0, 7.67196809500038, 4),
(0.3036210954390051, 0.5020744140554851)),
((3.2140937198013564, 9.0, 3.4367939376082868, 3),
(0.30085241356011383, 0.3526678514951116)),
((3.1967035260607033, 9.0, 24.78003138905329, 3),
(0.2002744068603639, 0.577845271609722)),
((2.5, 9.0, 27.736581704977635, 3),
(0.19384392806515655, 0.6398389804597316)),
((2.7908763449337677, 8.0, 20.868304564027603, 3),
(0.22423490392017478, 0.5571230146385515)),
((3.221499566897477, 6.0, 5.467726257137659, 3),
(0.2873858207778558, 0.3848413427300203)),
((2.622512070432247, 6.0, 19.364472252973304, 3),
(0.19637966981582017, 0.5896085735184569)),
((3.2873061024849806, 5.0, 19.855724192587914, 3),
(0.15107222663217493, 0.6036632631469063)),
((5.727612405003367, 3.0, 10.746642552166502, 3),
(0.17920676997174168, 0.4665320350934196)),
((5.347955701149093, 3.0, 18.900471815194905, 3),
(0.07543521561825134, 0.5598764933852344)),
((5.7385751713204325, 4.0, 4.223160837759656, 3),
(0.27341911934324464, 0.3703759727313385)),
((5.720824103581511, 2.0, 4.878068159363519, 3),
(0.2467519504501329, 0.396042965876929)),
((5.316780024484356, 1.0, 8.043957606541364, 3),
(0.05387242044052574, 0.5623155400405534)),
((5.7623230008312385, 2.0, 9.507411716255689, 3),
(0.16257886565663734, 0.4748117643951303)),
((5.569555328848931, 2.0, 17.594491934810442, 3),
(-0.005438870615513399, 0.5989078963083921)),
((5.985579505387595, 2.0, 14.803434527189347, 3),
(0.05088199721408976, 0.5508235640922985)),
((5.461619603420755, 3.0, 6.6471547360970025, 3),
(0.2368393501455289, 0.4149076913619224)),
((8.479050960745253, 3.0, 19.932170607445244, 3),
(0.055960429731629305, 0.4808828600574318)),
((7.838277926195208, 3.0, 6.238528025218592, 3),
(0.23006932476384195, 0.3896583956403046)),
((8.2830613968175, 3.0, 10.350825174769154, 3),
(0.17154862793173067, 0.42348961815670966)),
((7.603155032355272, 6.0, 29.139541165198704, 3),
(0.07552585363483849, 0.5167802829864803)),
((8.324115039527976, 7.0, 23.515778973195257, 3),
(0.1410408872001719, 0.4624726907273586)),
((8.44424273124686, 7.0, 2.4130843113046656, 3),
(0.2935043828079583, 0.3347759364340525)),
((8.309061774521076, 6.0, 17.507252134514488, 3),
(0.16639630860705243, 0.44095922688344563)),
((7.698664797242625, 3.0, 18.828802660207376, 3),
(0.06976476518639417, 0.495578148736382)),
((8.14037117068092, 3.0, 14.649933295354042, 3),
(0.11587116950368091, 0.45887391411549944)),
((8.484903553213694, 2.0, 11.879562262633948, 3),
(0.10118404307076279, 0.4519866737136369)),
((8.454109029623016, 2.0, 4.606317173304252, 3),
(0.23983680211973246, 0.37222196630379506)),
((8.305262429168986, 5.0, 3.9045072719017924, 3),
(0.27629481645975607, 0.3507630813736354)),
((8.189730004579287, 5.0, 28.126992759236863, 3),
(0.05705292734056282, 0.5054724651556731)),
((7.54028778107475, 6.0, 6.635319193935916, 3),
(0.2612474304996075, 0.3719851145582656)),
((7.9629991342362985, 5.0, 20.293354805626866, 3),
(0.11711998926043284, 0.47473573591944374)),
((8.432959559038371, 6.0, 26.469970873757067, 3),
(0.0950374077805397, 0.48534709230317313)),
((10.0, 9.0, 6.0469956581432704, 3),
(0.2699968780049759, 0.35154672720525215)),
((9.771353946056914, 9.0, 20.82975271547889, 3),
(0.1703271759874031, 0.4176338874276043)),
((9.376380796522223, 9.0, 13.348522394106682, 3),
(0.22483106117061774, 0.3904997531995366)),
((9.912704179532229, 4.0, 25.778231770351923, 3),
(0.041682531090741895, 0.45638108279644746)),
((10.0, 5.0, 13.712247643370837, 3),
(0.16970415882749393, 0.4075044010703485)),
((10.0, 4.0, 28.587923360931033, 3),
(0.018590574793017255, 0.4614698084023276)),
((9.287535146732925, 4.0, 6.997389565284625, 3),
(0.22779618119117986, 0.3782363477013876)),
((10.0, 6.0, 30.932435068478792, 3),
(0.059472954520648456, 0.4677973052054364)),
((10.0, 5.0, 7.946854746461393, 3),
(0.23028991231427853, 0.372620011437199)),
((10.0, 6.0, 26.172410297895773, 3),
(0.09297071254878268, 0.45251723089368734)),
((2.5, 7.0, 12.643410557057086, 2),
(0.20473101026501478, 0.3654658906154655)),
((2.5, 3.0, 19.167537762557394, 2),
(0.05510943657077363, 0.3689588995456623)),
((3.284581774573411, 4.0, 10.316761862277126, 2),
(0.16617263015780218, 0.34847110450044866)),
((3.0814075494281132, 3.0, 4.031683724514751, 2),
(0.24102409919891465, 0.3343695993283068)),
((2.5342634625499727, 9.0, 14.863663104253218, 2),
(0.20177805681461625, 0.3722383864015372)),
((5.275920564662094, 9.0, 12.879135949769728, 2),
(0.20397359836905862, 0.342464374531538)),
((5.522856449128964, 9.0, 17.412586595686815, 2),
(0.16690331344259282, 0.3431822968019505)),
((5.885914939777947, 9.0, 17.388086814072437, 2),
(0.16584353646682692, 0.3393430032707385)),
((5.956560321967156, 4.0, 4.31169020481439, 2),
(0.24129709871769092, 0.3181985042671494)),
((5.6279111948942635, 5.0, 16.56681914443115, 2),
(0.11616623038399936, 0.3161181105644866)),
((5.8534547245334565, 5.0, 18.83506980508535, 2),
(0.09538175760437877, 0.3095289993435369)),
((5.445581146699364, 5.0, 25.690737024023207, 2),
(0.05808395140333093, 0.3095707855049915)),
((5.144720369630256, 5.0, 18.979172966805407, 2),
(0.09701692047501353, 0.32135192146478)),
((5.2907074463880175, 6.0, 13.598520998056053, 2),
(0.16894640868927147, 0.3308774206782637)),
((5.415844403197766, 6.0, 15.178617464461626, 2),
(0.15478320533987108, 0.32921474300090464)),
((8.204144852288245, 6.0, 4.020177691372295, 2),
(0.259276270369745, 0.3144147576406639)),
((9.366069953403018, 3.0, 15.422766182794579, 2),
(0.06961435752241517, 0.22151452459065538)),
((10.0, 4.0, 9.192387616705815, 2),
(0.1593065733661186, 0.2652494804914122)),
((10.0, 3.0, 15.954247893607032, 2),
(0.06533856558730797, 0.20620817208408798)),
((9.260586271537607, 3.0, 10.59517579170162, 2),
(0.1194483796728095, 0.2491723584774583)),
((9.571675864670619, 3.0, 17.398847531397934, 2),
(0.056120220665006354, 0.20987427459193636)),
((3.2387393821759787, 5.0, 3.7435106940988625, 1),
(0.2529831434272229, 0.29484575887521186)),
((2.5, 4.0, 7.399343614420917, 1),
(0.18304020679575475, 0.25792603874732756)),
((2.5, 4.0, 8.860840417367838, 1),
(0.1619064862820606, 0.24508285645237338)),
((2.5, 8.0, 10.887265616829124, 1),
(0.1982153399209648, 0.2800403945667933)),
((2.5, 7.0, 10.10233537418591, 1),
(0.19839199656426879, 0.2769729728229426)),
((2.6104349455855846, 8.0, 4.236171515065992, 1),
(0.26435154821318096, 0.30558752020931723)),
((2.5, 9.0, 5.3613636980274295, 1),
(0.253705681170712, 0.3036923862002273)),
((3.1731014606584806, 8.0, 15.199536235308303, 1),
(0.1580590485886202, 0.25147475798106617)),
((2.5, 7.0, 5.4202608625739925, 1),
(0.24791624789984437, 0.2982609826359614)),
((2.5, 8.0, 9.619364938403443, 1),
(0.21036828710980593, 0.28549866725870554)),
((3.252581509053177, 7.0, 6.2224060204343745, 1),
(0.2390630935153159, 0.29177058812605583)),
((2.5, 8.0, 12.261808397585057, 1),
(0.1856448040789573, 0.27399568137110875)),
((2.5, 3.0, 7.768505546917617, 1),
(0.15474603763604755, 0.23547281814409443)),
((2.5, 3.0, 6.998840911724095, 1),
(0.16686825564034552, 0.24336188065482803)),
((3.020562119690717, 3.0, 5.203087539105082, 1),
(0.19673848562818472, 0.2591147093725996)),
((5.2190911687613255, 3.0, 13.573887550967275, 1),
(0.0906282824426043, 0.15261288582990243)),
((5.5962506280473505, 2.0, 5.165106850365733, 1),
(0.18029987042327314, 0.22800579756695252)),
((5.078574838897358, 3.0, 6.8599427244043705, 1),
(0.17038770476932538, 0.22610259207324904)),
((5.1756171558445825, 3.0, 4.56080038214103, 1),
(0.20820572971121643, 0.25527794433698137)),
((5.497353020782844, 5.0, 2.0, 1),
(0.279552235480715, 0.30256072076105706)),
((5.841773513544001, 6.0, 13.28936566781855, 1),
(0.15805095852659043, 0.219578147164172)),
((5.580549185463668, 7.0, 16.1803201492634, 1),
(0.14439615742800296, 0.21383831068232395)),
((5.287772726922527, 7.0, 14.098946461580404, 1),
(0.16108942091896006, 0.22841596148263876)),
((8.358221285614269, 5.0, 17.271563597297103, 1),
(0.11773949049199511, 0.15965371392065625)),
((7.87724479635977, 5.0, 5.598934346859475, 1),
(0.23076678125893227, 0.2641771899347817)),
((8.323336953587479, 5.0, 7.0881523668119195, 1),
(0.21347348637221167, 0.24827181533645154)),
((7.845486096299681, 5.0, 16.23379517928239, 1),
(0.122698773315744, 0.168346491930928)),
((8.020564429287921, 5.0, 18.390260797283936, 1),
(0.10913570150779599, 0.15334234592862114)),
((8.382569502344943, 5.0, 13.97512411087629, 1),
(0.14368321354779087, 0.18508081572797264)),
((7.855593749782354, 3.0, 5.360435825061775, 1),
(0.19862762162502848, 0.23353318934066486)),
((7.655501153733914, 4.0, 9.769593047963392, 1),
(0.1631153136408173, 0.20467585514331496)),
((7.653019158008493, 6.0, 11.704589766625281, 1),
(0.17675687521357686, 0.22227909812729865)),
((9.843286146335094, 3.0, 15.473317400474043, 1),
(0.09727706414766314, 0.11902807291267123)),
((10.0, 3.0, 5.415846167802247, 1),
(0.20244200747389485, 0.22588706133330697)),
((9.474422358368296, 3.0, 15.178161395592507, 1),
(0.09722523557671268, 0.12212902007311457)),
((9.196648156004963, 8.0, 5.069223366759241, 1),
(0.259590110606016, 0.2847030494945458)),
((9.59661128432634, 8.0, 11.180193797198104, 1),
(0.19725272984671793, 0.23273775140666494)),
((10.0, 6.0, 7.76280231640685, 1),
(0.22139057567772807, 0.24895849892069766)),
((10.0, 7.0, 18.37437538640251, 1),
(0.1385665599883837, 0.17337937229518244)),
((2.8739501345809, 4.0, 4.494521106912674, 10),
(0.24363463440447483, 0.2527864036169822)),
((2.979763831715893, 9.0, 6.426710793964199, 10),
(0.2536062796096411, 0.2679911028334137)),
((2.5, 9.0, 2.491252841450378, 10),
(0.29221903195440846, 0.3020506629214542)),
((2.5, 8.0, 8.82337986403619, 10),
(0.2291778912916562, 0.2455895812236743)),
((2.5, 5.0, 18.83933997786449, 10),
(0.13113805913613338, 0.1355442901438808)),
((2.5, 5.0, 5.417711811598947, 10),
(0.24334188621371236, 0.2555545429854522)),
((2.5, 9.0, 8.04535672534691, 10),
(0.23865982455989818, 0.25668250292257166)),
((2.5, 3.0, 16.195810159806815, 10),
(0.11192304412106252, 0.10562932888135229)),
((5.6678871626197305, 2.0, 18.226010811743183, 10),
(0.12710395272558006, 0.06481848713426154)),
((5.759673840199206, 2.0, 30.42873152741525, 10),
(0.11709221638289552, 0.036741995906091535)),
((5.783634661463273, 2.0, 21.480194214511137, 10),
(0.12525468287616728, 0.054567458290390045)),
((5.118173248862928, 2.0, 41.86847335857883, 10),
(0.09079570140926681, 0.021796051692063684)),
((5.757349724389667, 2.0, 13.609604267804956, 10),
(0.14301420657653752, 0.09199523693485535)),
((5.279304061296045, 5.0, 22.876127528048663, 10),
(0.13458758534346954, 0.10713223523452234)),
((5.715709801059808, 5.0, 30.360213488022158, 10),
(0.11886049487302211, 0.06981616463630416)),
((5.947947304520848, 4.0, 4.8966439066197935, 10),
(0.2500490236976596, 0.24288778972563252)),
((5.09899322481724, 5.0, 26.26875042475258, 10),
(0.11709613281836206, 0.08769066848517765)),
((5.53222949762985, 5.0, 7.756449262721482, 10),
(0.2311862502859707, 0.22551387883678542)),
((5.923584541768192, 5.0, 19.567605030849386, 10),
(0.15795418059599556, 0.1254715070218464)),
((5.950156387030171, 2.0, 4.953666946161412, 10),
(0.218650721667177, 0.1994067474023979)),
((5.614158136535322, 2.0, 20.644953904366893, 10),
(0.12233990218571343, 0.05689997242979489)),
((5.435908140730638, 3.0, 21.585064332200393, 10),
(0.12301577743313093, 0.07397033989544444)),
((5.539908561343329, 3.0, 44.90369903995316, 10),
(0.1037616438284486, 0.027945682306968186)),
((5.3792514320991325, 2.0, 25.88907455882873, 10),
(0.10873545894297332, 0.04236234760922594)),
((5.632909830682246, 6.0, 21.384042506861697, 10),
(0.15459252917395186, 0.1315791146519223)),
((5.20332651493292, 6.0, 15.514467427422431, 10),
(0.17721170525528537, 0.168150476585649)),
((5.927793692134072, 5.0, 3.7766395197414253, 10),
(0.27134805483093305, 0.27045711680975104)),
((5.817322396187511, 5.0, 11.31804158090752, 10),
(0.20484725163482398, 0.1908000868612774)),
((7.887786042250045, 1.0, 12.574240714561657, 10),
(0.18031303615086874, 0.07804201835995273)),
((7.949960633591607, 3.0, 25.621368902089333, 10),
(0.17044491341806606, 0.06725474122671778)),
((8.382592436810759, 2.0, 40.54127195292601, 10),
(0.17178949078589845, 0.024188092070610984)),
((7.96379736332257, 2.0, 36.70731870996695, 10),
(0.16770209089072902, 0.02993902193504958)),
((8.373924456610474, 2.0, 8.623846064990166, 10),
(0.20728444801820906, 0.14388370971167386)),
((8.151990686473388, 2.0, 42.229127196458144, 10),
(0.16904149941391258, 0.021656309205312783)),
((8.488384085232076, 8.0, 9.779628072315807, 10),
(0.2488450360455433, 0.22627788517996875)),
((8.438357068876163, 3.0, 26.873452492074044, 10),
(0.17599559283280108, 0.0648395414079383)),
((8.309434906530441, 2.0, 48.49966399344499, 10),
(0.16957295408937242, 0.012250504009832514)),
((7.7115794149655015, 3.0, 5.729859843354196, 10),
(0.23559517277851352, 0.20522005774679514)),
((7.6273740879401934, 2.0, 26.724973070776922, 10),
(0.16657953614593823, 0.047841689338012895)),
((7.693923337226084, 3.0, 48.407897505690485, 10),
(0.15852007773506502, 0.024265876491785094)),
((10.0, 6.0, 10.97195381591066, 10),
(0.24914023092044668, 0.2089496110383951)),
((9.113097274740381, 6.0, 2.7564645951736484, 10),
(0.2924932485533538, 0.28804004921141624)),
((10.0, 9.0, 6.003388325186025, 10),
(0.2779813642114769, 0.26596442258554676)),
((10.0, 5.0, 19.170756721559698, 10),
(0.2142974946878668, 0.13766814885103174)),
((9.380110088755156, 6.0, 18.817507743754415, 10),
(0.21117041381027712, 0.14914374281863527)),
((9.001795946577033, 8.0, 4.453854563212078, 10),
(0.2859321611242305, 0.28009650987573115)),
((10.0, 8.0, 3.653159723688856, 10),
(0.29311167360260465, 0.2880429565835092)),
((9.046182896421445, 3.0, 22.300946806849847, 10),
(0.1878970718888041, 0.08063250752355927)),
((9.459420796383784, 3.0, 10.552556949414955, 10),
(0.21991607033340996, 0.1509982577104353)),
((10.0, 3.0, 31.2476220198124, 10),
(0.19305142678811255, 0.056344277243534656)),
((10.0, 3.0, 29.2734347311525, 10),
(0.1942359391613085, 0.06175274143556113)),
((2.5, 8.0, 8.375074375178261, 9),
(0.27887477687446527, 0.24546183806091346)),
((2.5, 7.0, 9.502846862649331, 9),
(0.27464003598072734, 0.23312580166748068)),
((2.8619005171223564, 7.0, 7.466126134628901, 9),
(0.28349638659529314, 0.2511139999686381)),
((3.0874221941355513, 8.0, 2.493857829360787, 9),
(0.30329446073589955, 0.29950346826291635)),
((2.5, 4.0, 19.77471678075617, 9),
(0.2398055097946389, 0.12335032186680327)),
((2.5, 3.0, 45.299844868071496, 9),
(0.21835007756596425, 0.029400310263857008)),
((3.220553507003754, 3.0, 37.05938066272616, 9),
(0.23069339455828514, 0.046593234792596326)),
((2.5, 4.0, 39.166418500944374, 9),
(0.21883358149905563, 0.05350074449716688)),
((2.7654037016841957, 3.0, 29.726535569137937, 9),
(0.22876621648675485, 0.06517243299208932)),
((2.5, 4.0, 12.087654687250128, 9),
(0.25568086328187467, 0.17236450351743657)),
((2.5, 3.0, 3.382852759577178, 9),
(0.28321145706274836, 0.2446297883631457)),
((2.836612137080781, 4.0, 2.0, 9),
(0.29704964788098587, 0.2808823856404053)),
((2.8888545547050946, 3.0, 14.618307037832857, 9),
(0.2477649838426708, 0.12970326868762677)),
((5.164399331990519, 6.0, 9.111465383743912, 9),
(0.28951764964116206, 0.23371824784650205)),
((5.954180129965368, 6.0, 34.844915916827865, 9),
(0.2714161513182527, 0.09857200508129484)),
((5.500356903003388, 7.0, 13.154128131968298, 9),
(0.28721219988452035, 0.2147291790311368)),
((5.777207914079591, 6.0, 29.94398353538339, 9),
(0.27203705163012554, 0.11592402886317606)),
((5.535810057742433, 7.0, 8.892716664134475, 9),
(0.2942708344242646, 0.2451099871340891)),
((5.590040966343994, 4.0, 22.75661278689855, 9),
(0.27287420887848585, 0.12066815247186011)),
((5.282620261743346, 3.0, 18.732823688754383, 9),
(0.26860898171122366, 0.11390224239719735)),
((5.172895640160181, 3.0, 6.2292543458148515, 9),
(0.2877332357942354, 0.2114241583957755)),
((5.259721854731981, 3.0, 35.890872110681414, 9),
(0.2577945586528, 0.05159015452853822)),
((5.5536463415959245, 3.0, 10.076683709549055, 9),
(0.28257922939755636, 0.1719757532037015)),
((5.730003972159145, 2.0, 15.985698390269977, 9),
(0.273099534393889, 0.10636788604049591)),
((5.782381516990652, 2.0, 28.774618518379302, 9),
(0.26483796474476695, 0.050195747909618664)),
((5.069379781665461, 7.0, 2.2194841714206595, 9),
(0.3055058860092272, 0.2991172470460963)),
((5.903716333756614, 6.0, 35.50557429199497, 9),
(0.27002853761437307, 0.09626706166005734)),
((5.1346796709796605, 6.0, 27.726398643923417, 9),
(0.2627660248271106, 0.12221521725840637)),
((5.383260687864624, 6.0, 18.302295934127923, 9),
(0.2779886327084519, 0.17403664791365248)),
((5.869792088464701, 5.0, 32.55343216796663, 9),
(0.27112789885723526, 0.09796127335276403)),
((5.462451143540612, 6.0, 30.948864634440213, 9),
(0.2654630166563432, 0.10997924832348281)),
((5.357445269639698, 6.0, 5.261434469006405, 9),
(0.2990173572087526, 0.26625184527715584)),
((5.626373453003034, 6.0, 25.170846666445236, 9),
(0.27422758103121975, 0.13772616835371584)),
((8.284200895164993, 2.0, 17.238899804160177, 9),
(0.3015541927190563, 0.10594069228544409)),
((8.318102784124019, 2.0, 22.596147383535918, 9),
(0.29986823051730344, 0.07933946724011191)),
((7.851936866242713, 7.0, 20.962374407911458, 9),
(0.31278104464771295, 0.18168350176147405)),
((8.146081336032703, 8.0, 13.533962918469337, 9),
(0.3169359353603149, 0.2297919580402022)),
((8.09720864316275, 7.0, 17.33899155052454, 9),
(0.31638806878498915, 0.20284841728512604)),
((7.830256291991797, 7.0, 10.706822163825924, 9),
(0.3131765403497294, 0.24112945829089102)),
((7.80065897068848, 6.0, 6.211375680877805, 9),
(0.31162664368160353, 0.2642498962824071)),
((8.044863647118635, 7.0, 15.557155261544228, 9),
(0.31566706911724673, 0.21257488877131692)),
((8.461774802909071, 5.0, 36.03729693977732, 9),
(0.3193853258545449, 0.09829269654684747)),
((7.612382882207284, 4.0, 14.168690780706225, 9),
(0.30487133599504435, 0.1749148969907045)),
((8.169633927695997, 4.0, 27.23584610386441, 9),
(0.31018864589103556, 0.11196625320524731)),
((9.602031136015775, 6.0, 20.5806356758181, 9),
(0.3358702478125188, 0.18315683261358986)),
((9.663686030178818, 6.0, 29.047658472982956, 9),
(0.3420516528840192, 0.14510390621759314)),
((9.75292854736471, 5.0, 34.11493160528129, 9),
(0.3460058496414126, 0.11306352412679725)),
((10.0, 6.0, 4.216215730437086, 9),
(0.31858648539348344, 0.2854243280891126)),
((10.0, 6.0, 34.72852675583916, 9),
(0.35136426337791954, 0.1268144197324825)),
((10.0, 5.0, 14.779627294882367, 9),
(0.3368575900243706, 0.20211714305442638)),
((10.0, 6.0, 2.0, 9), (0.3146, 0.3018)),
((9.49705091394873, 6.0, 10.80885478009873, 9),
(0.32614459837684673, 0.23803767844806178)),
((9.826635163465532, 2.0, 7.06711443184985, 9),
(0.31994127256104854, 0.19825489523721926)),
((9.382502350301259, 5.0, 19.999476877446362, 9),
(0.3328136867226051, 0.17031645852096106)),
((9.115530591819274, 5.0, 5.883436488694818, 9),
(0.3188381363847329, 0.2614152937200581)),
((10.0, 5.0, 24.745870232952445, 9),
(0.3456712832096572, 0.1519571163934759)),
((9.378359588580793, 5.0, 26.295787257422923, 9),
(0.33580634416152166, 0.14170543655286671)),
((10.0, 2.0, 21.57257635660235, 9),
(0.3230213711821699, 0.08825848939915812)),
((10.0, 3.0, 26.039872491235577, 9),
(0.3343139553719324, 0.09760462479294567)),
((2.5, 3.0, 4.712138166253982, 8),
(0.3435962977395826, 0.2553142252457729)),
((2.8874578666829285, 2.0, 13.994896052145748, 8),
(0.37835548166727323, 0.14725225859091387)),
((3.435419560439465, 2.0, 6.718989113532732, 8),
(0.35983522137260177, 0.22143958809308129)),
((2.9925336062737173, 2.0, 7.198014339866309, 8),
(0.3575475618962941, 0.21313809904749426)),
((2.5, 1.0, 14.156726710024465, 8), (0.3368, 0.10107531241085566)),
((2.6104579288975813, 1.0, 3.3458156268951917, 8),
(0.3281972508449899, 0.2218263920769044)),
((5.1670653045538115, 8.0, 2.1409481568506346, 8),
(0.31905079087740745, 0.31163900088406254)),
((5.054434114346951, 7.0, 6.442157332603133, 8),
(0.35015366076172083, 0.2933004270294575)),
((5.803735682450612, 9.0, 10.443841773523394, 8),
(0.37537800934530957, 0.2859037624969863)),
((5.044877539779968, 6.0, 18.424428701407553, 8),
(0.4277251999841766, 0.23640419314222352)),
((5.484832402621484, 7.0, 5.474777491295647, 8),
(0.3449818965080228, 0.2983629686835587)),
((5.162300427200289, 7.0, 24.999056248525125, 8),
(0.4499951490629939, 0.22267274868897197)),
((5.877256360743413, 7.0, 15.450444143259661, 8),
(0.4135138022364607, 0.2615018632684154)),
((8.197449080109873, 2.0, 2.0, 8),
(0.3479365513139208, 0.2910133676082857)),
((7.997237265754237, 2.0, 11.655829335806517, 8),
(0.45364925931660116, 0.19649613209426764)),
((7.973192560907184, 9.0, 4.272886886879181, 8),
(0.3382290806106464, 0.3100993156381419)),
((7.510355740108461, 8.0, 14.320141317950995, 8),
(0.41602573142890514, 0.2791113211989992)),
((7.836498646186221, 8.0, 13.596658717999025, 8),
(0.41449506838703515, 0.28339932777466953)),
((7.782186965908517, 9.0, 13.902105524067945, 8),
(0.4117119567839591, 0.28581945828988925)),
((9.531795266771761, 5.0, 2.0, 8),
(0.3325257851841513, 0.3124819697521387)),
((10.0, 5.0, 11.055624912778937, 8),
(0.44623696767281984, 0.2877358440858011)),
((9.312270837393163, 7.0, 11.185222099189973, 8),
(0.4115569799140528, 0.2962671589127124)),
((10.0, 7.0, 13.895455902446677, 8),
(0.44457546784397745, 0.29335613303900565)),
((9.925669940032272, 5.0, 7.2040789887667955, 8),
(0.39993423356459634, 0.3000028935899999)),
((9.416740882402403, 5.0, 8.720116348180492, 8),
(0.41420875274374974, 0.2926385477337663)),
((10.0, 4.0, 16.469698910991372, 8),
(0.5288485073674999, 0.2505105957717457)),
((10.0, 6.0, 6.599237233947309, 8),
(0.37969275372249944, 0.30632137297889483)),
((10.0, 5.0, 4.550269784467781, 8),
(0.36647096673041096, 0.30759681204960715)),
((9.970332530519679, 6.0, 10.837022722087644, 8),
(0.4235409167024141, 0.2965173827647608)),
((2.962707587174585, 9.0, 9.999116931630539, 7),
(0.4082112009458871, 0.317851549976094)),
((3.1672052728994915, 9.0, 7.383624729892915, 7),
(0.38120519554647453, 0.3201699383275524)),
((2.5, 5.0, 17.881593853007615, 7),
(0.5525791262360914, 0.28084994335857105)),
((2.7415018638966284, 6.0, 18.00290873780138, 7),
(0.5290289042496541, 0.2948216346860084)),
((2.5, 6.0, 10.232668996271492, 7),
(0.4348850955537665, 0.3113811958067113)),
((2.877902226185231, 6.0, 3.5582034231201787, 7),
(0.3519353725942859, 0.31712947263253355)),
((2.5, 6.0, 27.77999592691697, 7),
(0.5960099816711264, 0.2656600122192491)),
((5.412821771284458, 3.0, 7.258040020605607, 7),
(0.49172648760450915, 0.31621073924712606)),
((5.83754747605084, 3.0, 11.998261380615471, 7),
(0.5975448574102102, 0.29794809688903484)),
((5.9693975439749885, 4.0, 14.397906420283302, 7),
(0.5881617924445709, 0.31236418999305104)),
((5.004079000563381, 5.0, 22.736677614468775, 7),
(0.63920549388473, 0.28783077224442344)),
((5.168438425945292, 4.0, 4.844860547907693, 7),
(0.40839719509713, 0.32305082659991396)),
((5.863284315202094, 4.0, 23.489710023246513, 7),
(0.7027892157150165, 0.2695458294720114)),
((5.756333389411959, 9.0, 7.301135618422141, 7),
(0.39078475628981396, 0.32893785909586065)),
((5.108337403014788, 8.0, 11.359771531491097, 7),
(0.44539182306302255, 0.32701253286287824)),
((8.314898437378535, 9.0, 4.238233636005843, 7),
(0.3599697419936892, 0.33117366775661333)),
((7.729982986777109, 5.0, 24.923686571499648, 7),
(0.6880520401649296, 0.31336218352271883)),
((8.201460399608226, 4.0, 11.589840844520428, 7),
(0.5584840865313621, 0.33959721735842735)),
((7.595604919273442, 5.0, 6.798265747221928, 7),
(0.4338251159286378, 0.3369357659148767)),
((8.378186361828917, 9.0, 8.022357890675561, 7),
(0.4105838750780343, 0.34204796349468947)),
((8.300135000740797, 8.0, 14.433553547681656, 7),
(0.5019186699595175, 0.35172447128424394)),
((8.229270762113973, 9.0, 6.350022396927342, 7),
(0.38792000675351795, 0.33801569665443554)),
((10.0, 3.0, 3.1152259635487924, 7),
(0.405141552942915, 0.33686460722138906)),
((9.756267998308681, 3.0, 14.803384721914584, 7),
(0.6814476281796187, 0.3179618099654896)),
((10.0, 4.0, 13.90160960971739, 7),
(0.6136634096115119, 0.3568983903902826)),
((10.0, 8.0, 19.365358380679876, 7), (0.5632882254261189, 0.374)),
((10.0, 9.0, 6.218490965882184, 7),
(0.3910588735223506, 0.34456639744594064)),
((10.0, 8.0, 13.887493044276624, 7),
(0.5055374095755961, 0.36877498608855325)),
((10.0, 5.0, 14.68907159946693, 7),
(0.5862646522729101, 0.3661588249401866)),
((10.0, 5.0, 24.263442351912005, 7),
(0.704502702343164, 0.353473115296176)),
((10.0, 8.0, 13.518172354943417, 7),
(0.5007362406142645, 0.36803634470988683)),
((2.7455640547144746, 2.0, 5.569110673549164, 6),
(0.5147965927948814, 0.35947427079052985)),
((3.1452880891491906, 5.0, 8.595832717291, 6),
(0.49156559782999765, 0.3836139842529673)),
((2.5, 4.0, 4.950679151608691, 6),
(0.4364884940203847, 0.3603170842733587)),
((2.5, 4.0, 4.383231249423155, 6),
(0.42312509592391534, 0.3564868109336063)),
((2.5, 2.0, 3.307282274836235, 6),
(0.43396162885139156, 0.3458470682650791)),
((5.045583268005572, 9.0, 9.59194524860244, 6),
(0.4426976823901471, 0.3880493030502878)),
((5.594284526041456, 9.0, 10.197201238166286, 6),
(0.45008651645515263, 0.39500104997859575)),
((5.988802467213943, 8.0, 12.30595195616923, 6),
(0.48725749804679613, 0.4136999258156572)),
((5.425850947396252, 5.0, 8.046156862703112, 6),
(0.48370601248767936, 0.39930129085649035)),
((5.405852543210212, 6.0, 16.635714109554605, 6),
(0.5613340460279886, 0.4289299103499902)),
((5.369364240119585, 5.0, 7.340573827339962, 6),
(0.46958736593496786, 0.39345497811572305)),
((5.702045821590509, 5.0, 10.325652051724541, 6),
(0.5189311698950891, 0.41373924250477145)),
((5.411096326958829, 6.0, 5.292034843095026, 6),
(0.40946256871366055, 0.37022550255078585)),
((8.242968536635763, 9.0, 4.90020586532881, 6),
(0.38006673083868486, 0.3693561101855342)),
((8.238754570485817, 9.0, 5.94133011037865, 6),
(0.3940851904797918, 0.379080970224506)),
((8.39568424389748, 5.0, 9.461515968715135, 6),
(0.5006508183439433, 0.43147844246085765)),
((10.0, 5.0, 12.704963485646498, 6),
(0.5274094231965362, 0.462819853942586)),
((10.0, 5.0, 15.6753707607594, 6),
(0.5498899099449361, 0.47553916842341726)),
((10.0, 6.0, 3.506573388368494, 6),
(0.37697160768481713, 0.3696933421148326)),
((10.0, 6.0, 14.063922879568509, 6),
(0.5203515758376268, 0.46251414164655447)),
((10.0, 7.0, 3.128443413944953, 6),
(0.36320142718357795, 0.36035187523477064)),
((10.0, 7.0, 11.632405914314647, 6),
(0.4857175289017656, 0.4453349428787812)),
((9.050263182466011, 7.0, 17.08367694275979, 6),
(0.5287506410501914, 0.459929219239207)),
((10.0, 6.0, 4.736966947326921, 6),
(0.40006552365184517, 0.386391115357349)),
((9.409402543801862, 7.0, 6.28766021168659, 6),
(0.41478835014788323, 0.39548485637022385)),
((9.633394604006961, 8.0, 4.623044001702525, 6),
(0.37931316473145726, 0.3728258686141236)),
((9.020770192275748, 7.0, 13.422245014577644, 6),
(0.5060230048016112, 0.44738008068068885)),
((9.26317609686154, 7.0, 15.233295182477667, 6),
(0.517940485402123, 0.4564421027270388)),
((3.332782026387723, 7.0, 16.113419977677538, 5),
(0.49943843353399675, 0.4913166627882885)),
((2.5, 5.0, 6.5436496028361315, 5),
(0.446290656443251, 0.4355063353928991)),
((2.5, 6.0, 15.572129740854304, 5),
(0.5138820422172288, 0.4876949957096056)),
((2.5, 3.0, 2.0, 5), (0.3703, 0.37)),
((2.8285591842433737, 9.0, 21.473258817290873, 5),
(0.5043337880565358, 0.4951865962256489)),
((2.5, 8.0, 12.020108658634838, 5),
(0.4679648910008057, 0.4590236682506042)),
((2.5, 9.0, 14.42790441415372, 5),
(0.47578137869199916, 0.46735347427784546)),
((2.5, 5.0, 8.380243803410817, 5), (0.472682681837519, 0.455422938237116)),
((3.363079416671538, 5.0, 2.7755096642090313, 5),
(0.36889260512263344, 0.3743534718948429)),
((5.9271524261020545, 9.0, 20.603131952471927, 5),
(0.4802472251615271, 0.5159774928137729)),
((5.339079962653624, 8.0, 16.611574939424255, 5),
(0.4789877671483087, 0.5049439528400183)),
((5.347356764781598, 8.0, 15.41216519823205, 5),
(0.4745806920664243, 0.5005081883465945)),
((5.368950609634622, 7.0, 7.038165919924306, 5),
(0.41341154716192496, 0.4348136096758073)),
((5.063316239211655, 7.0, 16.01331933482103, 5),
(0.487164109418344, 0.5051543240966131)),
((5.929552854535908, 7.0, 7.57281344704806, 5),
(0.41853653124143764, 0.444243976557503)),
((5.72794655950891, 7.0, 10.668172633934036, 5),
(0.45318357111848423, 0.47904872268084375)),
((5.641782139668679, 6.0, 9.549016885745186, 5),
(0.4561067615040472, 0.4783274892995489)),
((5.344359642058747, 3.0, 5.430489560972486, 5),
(0.4516333592896905, 0.461109580193912)),
((7.749909297802317, 4.0, 4.268933751175051, 5),
(0.40175883449950806, 0.4334105720840665)),
((8.145409228909998, 5.0, 7.545633529064384, 5),
(0.435789245569801, 0.4810623452292749)),
((7.907253670159305, 6.0, 10.770986229289623, 5),
(0.4538016350466874, 0.5021167554370949)),
((7.592508492261312, 5.0, 4.933568344499713, 5),
(0.4009033326671016, 0.4323309706149007)),
((7.674872690410821, 5.0, 3.5502452884794837, 5),
(0.37590596292111755, 0.4014473524868083)),
((7.991979987062054, 7.0, 3.2837012487472252, 5),
(0.35678303301647424, 0.37978502351744886)),
((9.345599185286883, 7.0, 17.48852175788182, 5),
(0.46492781928598614, 0.537405269620011)),
((9.659595218511388, 8.0, 3.3572177484844636, 5),
(0.35143609296322403, 0.377417525766746)))
MUNSELL_COLOURS_TO_XYY = (
np.array([0.41515095, 0.51288165, 0.5702441]),
np.array([0.38804358, 0.46299149, 0.31592072]),
np.array([0.33491518, 0.36277402, 0.22128409]),
np.array([0.39936353, 0.58547238, 0.64852094]),
np.array([0.34767896, 0.4152922, 0.58706989]),
np.array([0.33966055, 0.41527226, 0.07167165]),
np.array([0.36265912, 0.47966922, 0.11068168]),
np.array([0.35748002, 0.45915987, 0.2727359]),
np.array([0.36348032, 0.48213512, 0.06293782]),
np.array([0.30330033, 0.73038471, 0.05538644]),
np.array([0.33159302, 0.43388935, 0.89380734]),
np.array([0.31838794, 0.40167814, 0.05382145]),
np.array([0.27202005, 0.83522048, 0.04995375]),
np.array([0.31425413, 0.58372544, 0.04377268]),
np.array([0.27634942, 0.75063178, 0.05211431]),
np.array([0.258837, 0.71096717, 0.01266934]),
np.array([0.31405111, 0.53120144, 0.02111891]),
np.array([0.33914454, 0.6563647, 0.71217401]),
np.array([0.35328989, 0.63157007, 0.65497851]),
np.array([0.32167873, 0.43862617, 0.3080991]),
np.array([0.31168045, 0.6270064, 0.34717087]),
np.array([0.31496017, 0.47530248, 0.67920304]),
np.array([0.26882355, 0.70549119, 0.69614462]),
np.array([0.31107787, 0.51188895, 0.58306925]),
np.array([0.31254722, 0.34686238, 0.6334334]),
np.array([0.30880402, 0.37157402, 0.08263161]),
np.array([0.23582365, 0.72197618, 0.06667783]),
np.array([0.29476305, 0.57521949, 0.23583791]),
np.array([0.28891056, 0.61005165, 0.28191444]),
np.array([0.17590584, 0.91365, 0.23196178]),
np.array([0.31292041, 0.3752074, 0.25538037]),
np.array([0.22307972, 0.8153644, 0.2698602]),
np.array([0.30648167, 0.48754769, 0.15098549]),
np.array([0.30382174, 0.34089453, 0.84210967]),
np.array([0.28517207, 0.38369148, 0.89445395]),
np.array([0.20621151, 0.56369357, 0.77955867]),
np.array([0.2465848, 0.49294784, 0.87271533]),
np.array([0.22538285, 0.5564611, 0.60532773]),
np.array([0.28500017, 0.38833563, 0.24045742]),
np.array([0.19598037, 0.59002914, 0.29181101]),
np.array([0.16437784, 0.59069112, 0.23370301]),
np.array([0.17940333, 0.4663929, 0.06448045]),
np.array([0.07553293, 0.55981543, 0.06406275]),
np.array([0.27330162, 0.37048932, 0.11621278]),
np.array([0.23251367, 0.40832841, 0.02585745]),
np.array([0.05704598, 0.55990299, 0.01221862]),
np.array([0.09405428, 0.51916421, 0.02268015]),
np.array([0.06306305, 0.54336526, 0.0361037]),
np.array([0.23250342, 0.41833342, 0.0559913]),
np.array([0.22630523, 0.39163204, 0.05597116]),
np.array([0.15858055, 0.42916814, 0.05259972]),
np.array([0.07933408, 0.51474312, 0.30905098]),
np.array([0.14028772, 0.46282023, 0.41589047]),
np.array([0.29271668, 0.33531051, 0.37326792]),
np.array([0.17253811, 0.43786778, 0.33686994]),
np.array([0.09180367, 0.46823752, 0.05151176]),
np.array([0.10903846, 0.44893518, 0.03595462]),
np.array([0.2428693, 0.37094376, 0.04060119]),
np.array([0.27771166, 0.34994832, 0.23574564]),
np.array([0.05867972, 0.50502648, 0.19891229]),
np.array([0.25930387, 0.37349411, 0.26874577]),
np.array([0.12284826, 0.47211684, 0.21388094]),
np.array([0.0890682, 0.48703791, 0.27058998]),
np.array([0.27018357, 0.35138182, 0.76804186]),
np.array([0.22062535, 0.38110738, 0.85084234]),
np.array([0.26193025, 0.3581405, 0.86839733]),
np.array([0.0431053, 0.45634623, 0.12074655]),
np.array([0.16522669, 0.40881359, 0.18014875]),
np.array([0.02517244, 0.46138968, 0.1317301]),
np.array([0.23349872, 0.37536989, 0.14476492]),
np.array([0.05119965, 0.46839242, 0.26212526]),
np.array([0.2315995, 0.37207726, 0.20351563]),
np.array([0.08301372, 0.45335265, 0.25304755]),
np.array([0.20183026, 0.36561544, 0.39526058]),
np.array([0.06340759, 0.37121187, 0.07975536]),
np.array([0.16044634, 0.34707426, 0.10145605]),
np.array([0.24416648, 0.33434737, 0.07774819]),
np.array([0.28155768, 0.33248001, 0.89992977]),
np.array([0.28105936, 0.3327088, 0.88937678]),
np.array([0.25255297, 0.34594245, 0.87623351]),
np.array([0.20616318, 0.34192146, 0.77176579]),
np.array([0.21898553, 0.33335124, 0.85174026]),
np.array([0.19119679, 0.33526743, 0.80792502]),
np.array([0.29624596, 0.31950269, 0.96665647]),
np.array([0.24328961, 0.31868567, 0.12931978]),
np.array([0.10471116, 0.30938022, 0.15549815]),
np.array([0.0862452, 0.30268915, 0.15900713]),
np.array([0.10497041, 0.32451898, 0.22191645]),
np.array([0.16894641, 0.33087742, 0.29312371]),
np.array([0.16144965, 0.33133829, 0.34018592]),
np.array([0.25864013, 0.31415379, 0.28205753]),
np.array([0.07732853, 0.22846579, 0.08121964]),
np.array([0.15795868, 0.26417318, 0.11377678]),
np.array([0.06907834, 0.20994435, 0.0722573]),
np.array([0.12862477, 0.25616557, 0.08539517]),
np.array([0.05881481, 0.21256736, 0.07052095]),
np.array([0.25058288, 0.29329096, 0.17796585]),
np.array([0.18830894, 0.26192867, 0.13740285]),
np.array([0.1684076, 0.25029878, 0.13934697]),
np.array([0.1951648, 0.27716957, 0.51306785]),
np.array([0.19935306, 0.27783329, 0.44060477]),
np.array([0.26308512, 0.3046212, 0.52610451]),
np.array([0.2532416, 0.30291555, 0.67153139]),
np.array([0.15890128, 0.2532598, 0.59956247]),
np.array([0.24841933, 0.2986962, 0.43833832]),
np.array([0.2082133, 0.28356991, 0.52733609]),
np.array([0.23939654, 0.2920611, 0.43144538]),
np.array([0.18279859, 0.27122662, 0.52199238]),
np.array([0.16449512, 0.24371038, 0.08686299]),
np.array([0.16724393, 0.24366794, 0.06480227]),
np.array([0.19881487, 0.26071106, 0.06927689]),
np.array([0.09076654, 0.15277497, 0.06421355]),
np.array([0.18253778, 0.23018215, 0.03460635]),
np.array([0.16926303, 0.22496873, 0.06237928]),
np.array([0.20398493, 0.2513471, 0.05473403]),
np.array([0.28140041, 0.30378091, 0.23081828]),
np.array([0.15231331, 0.21384066, 0.25883348]),
np.array([0.14386953, 0.21327677, 0.41482428]),
np.array([0.1593506, 0.22670722, 0.40114326]),
np.array([0.10949743, 0.15034868, 0.15892888]),
np.array([0.22674934, 0.26033997, 0.17110185]),
np.array([0.20569472, 0.2404847, 0.15700695]),
np.array([0.11359218, 0.15851929, 0.15851498]),
np.array([0.13446868, 0.17456223, 0.15665285]),
np.array([0.20295637, 0.23758918, 0.07464645]),
np.array([0.16020908, 0.20160833, 0.11096053]),
np.array([0.17946292, 0.22546056, 0.3340693]),
np.array([0.19584886, 0.21874231, 0.05264774]),
np.array([0.25950493, 0.28494406, 0.60260113]),
np.array([0.22170777, 0.24928491, 0.29763974]),
np.array([0.13564759, 0.16991066, 0.38138893]),
np.array([0.23373145, 0.24171207, 0.08831548]),
np.array([0.25339824, 0.26720506, 0.67917402]),
np.array([0.29210338, 0.30192924, 0.75127547]),
np.array([0.22958296, 0.2462168, 0.59738522]),
np.array([0.1258535, 0.12764109, 0.16297312]),
np.array([0.24227309, 0.25436998, 0.18624748]),
np.array([0.23758242, 0.25457444, 0.66865194]),
np.array([0.10476265, 0.09497701, 0.05235122]),
np.array([0.12612865, 0.06066443, 0.02676646]),
np.array([0.11705747, 0.03587748, 0.02951591]),
np.array([0.1232905, 0.0441543, 0.02037758]),
np.array([0.09139852, 0.01529466, 0.02045231]),
np.array([0.13833192, 0.07953813, 0.02236117]),
np.array([0.13361693, 0.10504399, 0.18414205]),
np.array([0.1210474, 0.06862453, 0.15728175]),
np.array([0.25249867, 0.24628189, 0.1353695]),
np.array([0.11706407, 0.08706468, 0.18814811]),
np.array([0.22549284, 0.2180621, 0.16192792]),
np.array([0.1534495, 0.11674072, 0.15905692]),
np.array([0.2235872, 0.20668864, 0.04253357]),
np.array([0.12515256, 0.06568452, 0.04436879]),
np.array([0.12125722, 0.0687482, 0.05533026]),
np.array([0.10373316, 0.0277414, 0.06333516]),
np.array([0.10925991, 0.04419045, 0.03405371]),
np.array([0.15402461, 0.13042053, 0.28570417]),
np.array([0.17573216, 0.16578146, 0.27364637]),
np.array([0.27401103, 0.27401935, 0.23451177]),
np.array([0.2075913, 0.19464274, 0.21940166]),
np.array([0.17049737, 0.06465369, 0.05868583]),
np.array([0.17064728, 0.0288915, 0.04372401]),
np.array([0.1672038, 0.03196773, 0.03579761]),
np.array([0.21031018, 0.15034168, 0.03888934]),
np.array([0.16827351, 0.02413193, 0.03757647]),
np.array([0.29178046, 0.29061931, 0.90323404]),
np.array([0.24910224, 0.22648966, 0.59336016]),
np.array([0.17601554, 0.0587606, 0.05160293]),
np.array([0.16834537, 0.01686511, 0.04374851]),
np.array([0.23182863, 0.19825806, 0.05291206]),
np.array([0.16638758, 0.05075245, 0.03650792]),
np.array([0.16028497, 0.01948654, 0.05046003]),
np.array([0.24957235, 0.21006823, 0.31587613]),
np.array([0.29306654, 0.28917618, 0.32466527]),
np.array([0.28495343, 0.27687408, 0.81760638]),
np.array([0.21441304, 0.13814375, 0.19716723]),
np.array([0.20941829, 0.14321541, 0.24327119]),
np.array([0.28541299, 0.27913907, 0.54006024]),
np.array([0.29230469, 0.28656219, 0.52465762]),
np.array([0.18804124, 0.08137467, 0.06580398]),
np.array([0.22025958, 0.15180899, 0.06551257]),
np.array([0.19309397, 0.06115047, 0.07873642]),
np.array([0.19437258, 0.06326427, 0.06829742]),
np.array([0.27887167, 0.24543217, 0.57450962]),
np.array([0.27487624, 0.23376357, 0.46322748]),
np.array([0.28356864, 0.2519005, 0.45980664]),
np.array([0.30333596, 0.30005216, 0.66401066]),
np.array([0.23835467, 0.11558036, 0.09827669]),
np.array([0.23067198, 0.05028062, 0.07671426]),
np.array([0.21902307, 0.05208443, 0.11065271]),
np.array([0.22907253, 0.06719948, 0.06903321]),
np.array([0.2536145, 0.16387485, 0.0990085]),
np.array([0.28535713, 0.25114971, 0.08429109]),
np.array([0.29701504, 0.28076672, 0.11652327]),
np.array([0.24894294, 0.13513311, 0.0750785]),
np.array([0.28976435, 0.23551078, 0.3203068]),
np.array([0.28699217, 0.2122739, 0.38376156]),
np.array([0.2942318, 0.24483482, 0.41568603]),
np.array([0.27112866, 0.10892559, 0.09137276]),
np.array([0.26932562, 0.11871922, 0.07456975]),
np.array([0.28774446, 0.21149857, 0.06409553]),
np.array([0.25815891, 0.05632389, 0.07763328]),
np.array([0.28438514, 0.18361032, 0.08751006]),
np.array([0.27466364, 0.11623324, 0.04459164]),
np.array([0.26635689, 0.0603288, 0.04436654]),
np.array([0.30526917, 0.29787617, 0.38438766]),
np.array([0.26275899, 0.12295408, 0.3048271]),
np.array([0.27733084, 0.16764806, 0.24584118]),
np.array([0.27121622, 0.0996767, 0.21385417]),
np.array([0.26547923, 0.10802713, 0.26515926]),
np.array([0.29841781, 0.26325636, 0.25902873]),
np.array([0.27412192, 0.13541072, 0.26778091]),
np.array([0.3042953, 0.11611832, 0.04387]),
np.array([0.30157505, 0.08506396, 0.03768091]),
np.array([0.31391169, 0.1856442, 0.48667459]),
np.array([0.3167079, 0.22835511, 0.52829657]),
np.array([0.31664956, 0.20454265, 0.45562827]),
np.array([0.31300137, 0.23982828, 0.40210613]),
np.array([0.31187872, 0.26667157, 0.33190218]),
np.array([0.31537904, 0.21052765, 0.39335492]),
np.array([0.31803143, 0.09273886, 0.1712263]),
np.array([0.30594132, 0.18152717, 0.14244072]),
np.array([0.31195968, 0.12089229, 0.15102095]),
np.array([0.33618672, 0.17589268, 0.24249386]),
np.array([0.34207627, 0.13875616, 0.24138597]),
np.array([0.34605075, 0.11899797, 0.23580785]),
np.array([0.31923003, 0.28291153, 0.25504488]),
np.array([0.35136426, 0.12256902, 0.2641027]),
np.array([0.33639641, 0.20777481, 0.23332748]),
np.array([0.31464507, 0.3010788, 0.27040807]),
np.array([0.32622786, 0.23679153, 0.28338647]),
np.array([0.31964789, 0.19702337, 0.02988488]),
np.array([0.33202416, 0.16293316, 0.16828902]),
np.array([0.3188341, 0.26119414, 0.19149517]),
np.array([0.34497302, 0.14740581, 0.17674791]),
np.array([0.33396066, 0.13204228, 0.15759269]),
np.array([0.32447663, 0.09207588, 0.03498261]),
np.array([0.32823298, 0.08288658, 0.04740281]),
np.array([0.34263192, 0.2492826, 0.04966462]),
np.array([0.37863885, 0.1480557, 0.03133476]),
np.array([0.36067287, 0.22508694, 0.03664306]),
np.array([0.35583972, 0.20890369, 0.0287403]),
np.array([0.34728299, 0.11402692, 0.01746108]),
np.array([0.32940771, 0.22789278, 0.01489395]),
np.array([0.31972567, 0.31122932, 0.53600948]),
np.array([0.35012172, 0.29333067, 0.42147094]),
np.array([0.37589661, 0.2850717, 0.66934047]),
np.array([0.42549932, 0.23904177, 0.33329037]),
np.array([0.34641765, 0.2972505, 0.38411768]),
np.array([0.45441652, 0.21797623, 0.36276856]),
np.array([0.41521602, 0.25989123, 0.39086156]),
np.array([0.34780042, 0.2928404, 0.0360562]),
np.array([0.4544551, 0.19822245, 0.03201793]),
np.array([0.33858745, 0.3098545, 0.70004006]),
np.array([0.41381262, 0.2839371, 0.60579167]),
np.array([0.39278492, 0.2914687, 0.81034741]),
np.array([0.33239612, 0.31251827, 0.19604738]),
np.array([0.43846181, 0.29096381, 0.23141236]),
np.array([0.40958022, 0.29719222, 0.48882871]),
np.array([0.44399899, 0.29369509, 0.43379687]),
np.array([0.40554919, 0.29723013, 0.16687769]),
np.array([0.42007003, 0.28930815, 0.1672933]),
np.array([0.52108329, 0.25574146, 0.13999526]),
np.array([0.3763801, 0.30728007, 0.34070289]),
np.array([0.36495307, 0.30801481, 0.20910915]),
np.array([0.42566912, 0.29564012, 0.28217939]),
np.array([0.38537971, 0.31745807, 0.82116554]),
np.array([0.37201534, 0.31965197, 0.79705828]),
np.array([0.55136347, 0.28138892, 0.19712193]),
np.array([0.53899416, 0.29048788, 0.25823634]),
np.array([0.43854811, 0.3103317, 0.27612362]),
np.array([0.35589069, 0.3165537, 0.24649473]),
np.array([0.6015019, 0.26287828, 0.27670596]),
np.array([0.49631592, 0.30111191, 0.04570504]),
np.array([0.60338354, 0.2746834, 0.04600213]),
np.array([0.57619776, 0.31554717, 0.14073356]),
|
np.array([0.65681487, 0.27970869, 0.16409107])
|
numpy.array
|
'''
Multipurpose Density Matrix Embedding theory (mp-DMET)
Copyright (C) 2015 <NAME>
Author: <NAME>, Unviversity of Minnesota
email: <EMAIL>
'''
import numpy as np
import scipy as scipy
from functools import reduce
class RHF_decomposition:
def __init__(self, mf, impOrbs, numBathOrbs, orthoOED, method = 'OED'):
self.mf = mf
self.impOrbs = impOrbs
self.method = method
self.numBathOrbs = numBathOrbs
self.orthoMO, self.orthoOED = orthoOED
def baths(self):
'''
This function is used to call the Schmidt basis using either an overlap matrix or 1-rdm (or OED)
'''
if self.method == 'OED':
return self.UsingOED(self.numBathOrbs, threshold = 1e-13)
elif self.method == 'overlap':
return self.UsingOverlap(self.numBathOrbs, threshold = 1e-7)
def UsingOverlap(self, numBathOrbs, threshold = 1e-7):
'''
Construct the RHF bath using a projector
ref: PHYSICAL REVIEW B 89, 035140 (2014)
'''
# Build the projectors for fragment and bath
nao = self.mf.mol.nao_nr()
P_F = np.zeros((nao,nao))
P_F[self.impOrbs == 1,self.impOrbs == 1] = 1
P_B = np.identity(nao)- P_F
# Build the overlap matrix between hole states and fragment orbs
nelec_pairs = self.mf.mol.nelectron // 2
Occ = self.orthoMO[:,:nelec_pairs]
M = reduce(np.dot,(Occ.T, P_F,Occ))
d, V = np.linalg.eigh(M) # 0 <= d <= 1
idx = (-d).argsort() #d close to 1 come first
d, V = d[idx], V[:, idx]
tokeep =
|
np.sum(d > threshold)
|
numpy.sum
|
from __future__ import print_function
import pytest
import numpy as np
import helpers
@pytest.mark.parametrize("N", list(range(6)))
def test_pascalRow_2D(N, x=0.2, y=0.5):
def pascal_2D_single_row(N, x, y):
xs = np.array([
|
np.power(x, N - ii)
|
numpy.power
|
import argparse
import os
import numpy as np
import pickle
from tqdm import tqdm
import gym
import matplotlib
matplotlib.use('agg')
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'xx-large',
'figure.figsize': (5, 4),
'axes.labelsize': 'xx-large',
'axes.titlesize':'xx-large',
'xtick.labelsize':'xx-large',
'ytick.labelsize':'xx-large'}
pylab.rcParams.update(params)
from matplotlib import pyplot as plt
from bc_mujoco import Policy
from utils import RandomAgent, gen_traj
import polytope as pc
################################
class ActionNoise(object):
def reset(self):
pass
class NormalActionNoise(ActionNoise):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def __call__(self):
return np.random.normal(self.mu, self.sigma)
def __repr__(self):
return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise(ActionNoise):
def __init__(self, mu, sigma, theta=.15, dt=0.033, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
################################
class NoiseInjectedPolicy(object):
def __init__(self,env,policy,action_noise_type,noise_level):
self.action_space = env.action_space
self.policy = policy
self.action_noise_type = action_noise_type
if action_noise_type == 'normal':
mu, std = np.zeros(self.action_space.shape), noise_level*np.ones(self.action_space.shape)
self.action_noise = NormalActionNoise(mu=mu,sigma=std)
elif action_noise_type == 'ou':
mu, std = np.zeros(self.action_space.shape), noise_level*np.ones(self.action_space.shape)
self.action_noise = OrnsteinUhlenbeckActionNoise(mu=mu,sigma=std)
elif action_noise_type == 'epsilon':
self.epsilon = noise_level
else:
assert False, "no such action noise type: %s"%(action_noise_type)
def act(self, obs, reward, done):
if self.action_noise_type == 'epsilon':
if np.random.random() < self.epsilon:
return self.action_space.sample()
else:
act = self.policy.act(obs,reward,done)
else:
act = self.policy.act(obs,reward,done)
act += self.action_noise()
return np.clip(act,self.action_space.low,self.action_space.high)
def reset(self):
self.action_noise.reset()
################################
class BCNoisePreferenceDataset(object):
def __init__(self,env,max_steps=None,min_margin=None):
self.env = env
self.max_steps = max_steps
self.min_margin = min_margin
"""
Computes constraint volume of feature expectation vectors by sorting them
based on their noise magnitude and then doing some computations. Currently
via MC sampling, constraint_volume is on [0, 1] (percent of sampled points
that satisfy the constraints)
"""
def compute_constraint_volume(self, feature_exps, N_samples=10000000, volume_method="chebyshev_ball"):
# First sort feature_exps by their noise value
sorted_feature_exps = sorted(feature_exps, key=lambda x: x[1])
# Generate constraint matrix
constraint_matrix = []
for i in range(len(sorted_feature_exps) - 1):
for j in range(i+1, len(sorted_feature_exps)):
constraint_matrix.append(sorted_feature_exps[i][0] - sorted_feature_exps[j][0])
A = np.array(constraint_matrix)
b = np.zeros(A.shape[0])
if volume_method == "monte-carlo":
# Monte-Carlo estimate of constraint volume --> (Assume weights on [-1, 1]^k)
monte_carlo_samples = 2*np.random.random_sample((len(b), N_samples)) - 1
constraint_vals = A.dot(monte_carlo_samples)
max_col_vals = np.max(constraint_vals, axis=0) # Compute max value in each column
# Find for how often the max column value is negative (ie. it satisfies all constraints)
constraint_volume = float(len(max_col_vals[max_col_vals < 0]))/float(N_samples)
elif volume_method == "chebyshev_ball":
# Compute polytope enforcing every ||w||_1 <= 1
new_constraints = np.vstack((np.eye(A.shape[-1]), -np.eye(A.shape[-1])))
A_new = np.vstack((A, new_constraints))
b_new = np.append(b, [1]*2*A.shape[-1])
# print(A_new)
# print(b_new)
p = pc.Polytope(A_new, b_new)
constraint_volume = p.chebR
# print(constraint_volume)
return constraint_volume
"""
Searches over noise parameters for the one which minimizes constraint volume
"""
def optimize_noise_grid_search(self, feature_exps, noise_grid, num_samples=10):
min_constraint_volume = np.inf
min_constraint_volume_feature_exp = feature_exps[0]
for i, noise_val in enumerate(noise_grid):
print("Noise Value: ", i)
# Sample from policy with this noise_val
noisy_teacher = NoiseInjectedPolicy(self.env,agent,'epsilon',noise_val)
demos = [self.get_rollout(noisy_teacher) for _ in range(num_samples)]
# Compute feature expectation
feature_exp = (self.get_feature_exps(demos), noise_val)
# Compute constraint volume when adding these feature expectations to feature_exps
new_feature_exps = feature_exps + [feature_exp]
constraint_volume = self.compute_constraint_volume(new_feature_exps)
if constraint_volume < min_constraint_volume:
min_constraint_volume = constraint_volume
min_constraint_volume_feature_exp = feature_exp
print(min_constraint_volume_feature_exp)
# Return the optimized noise value and the corresponding feature expectations
return min_constraint_volume_feature_exp, min_constraint_volume
def get_learned_noisy_rollouts(self, feature_exps, num_trajs, min_length, logdir):
# Sample rollouts from the learned noise values and generate dataset for TREX
noise_vals = np.unique(np.array([f[1] for f in feature_exps]))
print("Noise Vals: ", noise_vals)
trajs = []
for n in noise_vals:
noisy_teacher = NoiseInjectedPolicy(self.env,agent,'epsilon',n)
agent_trajs = []
assert (num_trajs > 0 and min_length <= 0) or (min_length > 0 and num_trajs <= 0)
while (min_length > 0 and np.sum([len(obs) for obs,_,_,_ in agent_trajs]) < min_length) or\
(num_trajs > 0 and len(agent_trajs) < num_trajs):
rollout_info = self.get_rollout(noisy_teacher)
agent_trajs.append( (rollout_info["features"], rollout_info["actions"], rollout_info["rewards"]) )
trajs.append( (n, agent_trajs))
self.trajs = trajs
with open(os.path.join(logdir,'learned_noise_rollouts.pkl'),'wb') as f:
pickle.dump(self.trajs,f)
# Computes features on which to learn reward function for constraint volume reduction, initially
# this can just be the observations themselves
def compute_features(self, observations):
return observations
def get_feature_exps(self, demos):
return np.mean(np.vstack([d["features"] for d in demos]), axis=0)
def get_rollout(self, teacher_policy):
obs,actions,rewards = gen_traj(self.env,teacher_policy,-1)
return {"obs": obs, "actions": actions, "rewards": rewards, "noise": teacher_policy.epsilon, "features": self.compute_features(obs)}
# num_trajs, trajs per noise level
def prebuild(self,agent,noise_range,num_trajs,min_length,logdir):
trajs = []
for noise_level in tqdm(noise_range):
noisy_policy = NoiseInjectedPolicy(self.env,agent,'epsilon',noise_level)
agent_trajs = []
assert (num_trajs > 0 and min_length <= 0) or (min_length > 0 and num_trajs <= 0)
while (min_length > 0 and np.sum([len(obs) for obs,_,_,_ in agent_trajs]) < min_length) or\
(num_trajs > 0 and len(agent_trajs) < num_trajs):
obs,actions,rewards = gen_traj(self.env,noisy_policy,-1)
agent_trajs.append((obs,actions,rewards))
trajs.append((noise_level,agent_trajs))
self.trajs = trajs
with open(os.path.join(logdir,'prebuilt.pkl'),'wb') as f:
pickle.dump(self.trajs,f)
def prebuild_learned_noise_injection(self,agent,noise_range,num_trajs,min_length,logdir,iter_samples,max_num_opt_iters,volume_tolerance):
# Get demos from teacher
teacher_policy = NoiseInjectedPolicy(self.env,agent,'epsilon',0)
demos = [self.get_rollout(teacher_policy) for _ in range(iter_samples)]
# Get feature expectation of demos
feature_exps = [ (self.get_feature_exps(demos), 0) ]
for i in range(max_num_opt_iters):
# Get optimized noise parameter and corresponding feeature expectation
opt_feature_exp, constraint_volume = self.optimize_noise_grid_search(feature_exps, noise_range)
# Add new optimized feature expectation to list
feature_exps.append(opt_feature_exp)
print("Iteration: ", i, " Final Constraint Volume: ", constraint_volume)
if constraint_volume < volume_tolerance:
break
# Save information to train reward function
self.get_learned_noisy_rollouts(feature_exps, num_trajs, min_length, logdir)
def load_prebuilt(self,fname):
print("GOT HERE")
if os.path.exists(fname):
with open(fname,'rb') as f:
self.trajs = pickle.load(f)
return True
else:
return False
def draw_fig(self,log_dir,demo_trajs):
demo_returns = [np.sum(rewards) for _,_,rewards in demo_trajs]
demo_ave, demo_std = np.mean(demo_returns),
|
np.std(demo_returns)
|
numpy.std
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inference demo for YAMNet."""
from __future__ import division, print_function
import sys, os
import json, codecs
import numpy as np
import resampy
import soundfile as sf
import tensorflow as tf
import params as yamnet_params
import yamnet as yamnet_model
def main(argv):
assert argv, 'Usage: inference.py <wav file> <wav file> ...'
model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'yamnet.h5')
classes_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'yamnet_class_map.csv')
event_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'event.json')
params = yamnet_params.Params()
yamnet = yamnet_model.yamnet_frames_model(params)
yamnet.load_weights(model_path)
yamnet_classes = yamnet_model.class_names(classes_path)
for file_name in argv:
# Decode the WAV file.
wav_data, sr = sf.read(file_name, dtype=np.int16)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
waveform = wav_data / 32768.0 # Convert to [-1.0, +1.0]
waveform = waveform.astype('float32')
# Convert to mono and the sample rate expected by YAMNet.
if len(waveform.shape) > 1:
waveform =
|
np.mean(waveform, axis=1)
|
numpy.mean
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
import pytest
import numpy as np
from astropy.table import Table, QTable, TableMergeError, Column, MaskedColumn
from astropy.table.operations import _get_out_class, join_skycoord, join_distance
from astropy import units as u
from astropy.utils import metadata
from astropy.utils.metadata import MergeConflictError
from astropy.utils.compat.context import nullcontext
from astropy import table
from astropy.time import Time
from astropy.coordinates import (SkyCoord, SphericalRepresentation,
UnitSphericalRepresentation,
CartesianRepresentation,
BaseRepresentationOrDifferential,
search_around_3d)
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.io.misc.asdf.tags.helpers import skycoord_equal
try:
import scipy # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
class TestJoin():
def _setup(self, t_cls=Table):
lines1 = [' a b c ',
' 0 foo L1',
' 1 foo L2',
' 1 bar L3',
' 2 bar L4']
lines2 = [' a b d ',
' 1 foo R1',
' 1 foo R2',
' 2 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls(self.t2, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]),
('c', {'a': 1, 'b': 1}),
('d', 1),
('a', 1)])
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.join(self.t1, self.t2, join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(self.t1, self.t3, join_type='inner')
assert len(w) == 3
assert out.meta == self.t3.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn')
assert len(w) == 3
assert out.meta == self.t3.meta
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent')
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense')
def test_both_unmasked_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
assert type(t12['c']) is type(t1['c']) # noqa
assert type(t12['d']) is type(t2['d']) # noqa
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left')
assert t12.has_masked_columns is True
assert t12.masked is False
for name in ('a', 'b', 'c'):
assert type(t12[name]) is Column
assert type(t12['d']) is MaskedColumn
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Right join
t12 = table.join(t1, t2, join_type='right')
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer')
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type='outer')
t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b'])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys='a')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b_1']) is type(t1['b']) # noqa
assert type(t12['c']) is type(t1['c']) # noqa
assert type(t12['b_2']) is type(t2['b']) # noqa
assert type(t12['d']) is type(t2['d']) # noqa
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
# Right join
t12 = table.join(t1, t2, join_type='right', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
def test_masked_unmasked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
# Result table is never masked
t1m2 = table.join(t1m, t2, join_type='inner')
assert t1m2.masked is False
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t1m2 = table.join(t1m, t2, join_type='inner', keys='a')
assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar R3'])
t21m = table.join(t2, t1m, join_type='inner', keys='a')
assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ',
'--- --- --- --- ---',
' 1 foo R2 -- L2',
' 1 foo R2 bar --',
' 1 foo R1 -- L2',
' 1 foo R1 bar --',
' 2 bar R3 bar L4'])
def test_masked_masked(self, operation_table_type):
self._setup(operation_table_type)
"""Two masked tables"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
t2m = operation_table_type(self.t2, masked=True)
# Result table is never masked but original column types are preserved
t1m2m = table.join(t1m, t2m, join_type='inner')
assert t1m2m.masked is False
for col in t1m2m.itercols():
assert type(col) is MaskedColumn
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t2m['d'].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type='inner', keys='a')
assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar --'])
def test_classes(self):
"""Ensure that classes and subclasses get through as expected"""
class MyCol(Column):
pass
class MyMaskedCol(MaskedColumn):
pass
t1 = Table()
t1['a'] = MyCol([1])
t1['b'] = MyCol([2])
t1['c'] = MyMaskedCol([3])
t2 = Table()
t2['a'] = Column([1, 2])
t2['d'] = MyCol([3, 4])
t2['e'] = MyMaskedCol([5, 6])
t12 = table.join(t1, t2, join_type='inner')
for name, exp_type in (('a', MyCol), ('b', MyCol), ('c', MyMaskedCol),
('d', MyCol), ('e', MyMaskedCol)):
assert type(t12[name] is exp_type)
t21 = table.join(t2, t1, join_type='left')
# Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be
# masked, but col 'c' stays since MyMaskedCol supports masking.
for name, exp_type in (('a', MyCol), ('b', MaskedColumn), ('c', MyMaskedCol),
('d', MyCol), ('e', MyMaskedCol)):
assert type(t21[name] is exp_type)
def test_col_rename(self, operation_table_type):
self._setup(operation_table_type)
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y',
table_names=['L', 'R'], keys='a')
assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd']
def test_rename_conflict(self, operation_table_type):
self._setup(operation_table_type)
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys='a')
def test_missing_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=['a', 'not there'])
def test_bad_join_type(self, operation_table_type):
self._setup(operation_table_type)
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type='illegal value')
def test_no_common_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1['a']
del t1['b']
del t2['a']
del t2['b']
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that has a masked element"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t2 = operation_table_type(self.t2, masked=True)
table.join(t1, t2) # OK
t2['a'].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t2.rename_column('d', 'c') # force col conflict and renaming
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
# Key col 'a', should first value ('cm')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value 't1_b'
t2['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta = meta1
t2['a'].info.meta = meta2
# Key col 'b', should be meta2
t2['b'].info.meta = meta2
# All these should pass through
t1['c'].info.format = '%3s'
t1['c'].info.description = 't1_c'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
if operation_table_type is Table:
ctx = pytest.warns(metadata.MergeConflictWarning, match=r"In merged column 'a' the 'unit' attribute does not match \(cm != m\)") # noqa
else:
ctx = nullcontext()
with ctx:
t12 = table.join(t1, t2, keys=['a', 'b'])
assert t12['a'].unit == 'm'
assert t12['b'].info.description == 't1_b'
assert t12['b'].info.format == '%6s'
assert t12['a'].info.meta == self.meta_merge
assert t12['b'].info.meta == meta2
assert t12['c_1'].info.format == '%3s'
assert t12['c_1'].info.description == 't1_c'
assert t12['c_2'].info.format == '%6s'
assert t12['c_2'].info.description == 't2_c'
def test_join_multidimensional(self, operation_table_type):
self._setup(operation_table_type)
# Regression test for #2984, which was an issue where join did not work
# on multi-dimensional columns.
t1 = operation_table_type()
t1['a'] = [1, 2, 3]
t1['b'] = np.ones((3, 4))
t2 = operation_table_type()
t2['a'] = [1, 2, 3]
t2['c'] = [4, 5, 6]
t3 = table.join(t1, t2)
np.testing.assert_allclose(t3['a'], t1['a'])
np.testing.assert_allclose(t3['b'], t1['b'])
np.testing.assert_allclose(t3['c'], t2['c'])
def test_join_multidimensional_masked(self, operation_table_type):
self._setup(operation_table_type)
"""
Test for outer join with multidimensional columns where masking is required.
(Issue #4059).
"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
a = table.MaskedColumn([1, 2, 3], name='a')
a2 = table.Column([1, 3, 4], name='a')
b = table.MaskedColumn([[1, 2],
[3, 4],
[5, 6]],
name='b',
mask=[[1, 0],
[0, 1],
[0, 0]])
c = table.Column([[1, 1],
[2, 2],
[3, 3]],
name='c')
t1 = operation_table_type([a, b])
t2 = operation_table_type([a2, c])
t12 = table.join(t1, t2, join_type='inner')
assert np.all(t12['b'].mask == [[True, False],
[False, False]])
assert not hasattr(t12['c'], 'mask')
t12 = table.join(t1, t2, join_type='outer')
assert np.all(t12['b'].mask == [[True, False],
[False, True],
[False, False],
[True, True]])
assert np.all(t12['c'].mask == [[False, False],
[True, True],
[False, False],
[False, False]])
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
cls_name = type(col).__name__
len_col = len(col)
idx = np.arange(len_col)
t1 = table.QTable([idx, col], names=['idx', 'm1'])
t2 = table.QTable([idx, col], names=['idx', 'm2'])
# Set up join mismatches for different join_type cases
t1 = t1[[0, 1, 3]]
t2 = t2[[0, 2, 3]]
# Test inner join, which works for all mixin_cols
out = table.join(t1, t2, join_type='inner')
assert len(out) == 2
assert out['m2'].__class__ is col.__class__
assert np.all(out['idx'] == [0, 3])
if cls_name == 'SkyCoord':
# SkyCoord doesn't support __eq__ so use our own
assert skycoord_equal(out['m1'], col[[0, 3]])
assert skycoord_equal(out['m2'], col[[0, 3]])
elif 'Repr' in cls_name or 'Diff' in cls_name:
assert np.all(representation_equal(out['m1'], col[[0, 3]]))
assert np.all(representation_equal(out['m2'], col[[0, 3]]))
else:
assert np.all(out['m1'] == col[[0, 3]])
assert np.all(out['m2'] == col[[0, 3]])
# Check for left, right, outer join which requires masking. Only Time
# supports this currently.
if cls_name == 'Time':
out = table.join(t1, t2, join_type='left')
assert len(out) == 3
assert np.all(out['idx'] == [0, 1, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, False, False])
assert np.all(out['m2'].mask == [False, True, False])
out = table.join(t1, t2, join_type='right')
assert len(out) == 3
assert np.all(out['idx'] == [0, 2, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, True, False])
assert np.all(out['m2'].mask == [False, False, False])
out = table.join(t1, t2, join_type='outer')
assert len(out) == 4
assert np.all(out['idx'] == [0, 1, 2, 3])
assert np.all(out['m1'] == col)
assert np.all(out['m2'] == col)
assert np.all(out['m1'].mask == [False, False, True, False])
assert np.all(out['m2'].mask == [False, True, False, False])
else:
# Otherwise make sure it fails with the right exception message
for join_type in ('outer', 'left', 'right'):
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert ('join requires masking' in str(err.value)
or 'join unavailable' in str(err.value))
def test_cartesian_join(self, operation_table_type):
t1 = Table(rows=[(1, 'a'),
(2, 'b')], names=['a', 'b'])
t2 = Table(rows=[(3, 'c'),
(4, 'd')], names=['a', 'c'])
t12 = table.join(t1, t2, join_type='cartesian')
assert t1.colnames == ['a', 'b']
assert t2.colnames == ['a', 'c']
assert len(t12) == len(t1) * len(t2)
assert str(t12).splitlines() == [
'a_1 b a_2 c ',
'--- --- --- ---',
' 1 a 3 c',
' 1 a 4 d',
' 2 b 3 c',
' 2 b 4 d']
with pytest.raises(ValueError, match='cannot supply keys for a cartesian join'):
t12 = table.join(t1, t2, join_type='cartesian', keys='a')
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_skycoord_sky(self):
sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg')
sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg')
t1 = Table([sc1], names=['sc'])
t2 = Table([sc2], names=['sc'])
t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)})
exp = ['sc_id sc_1 sc_2 ',
' deg,deg deg,deg ',
'----- ------- --------',
' 1 1.0,0.0 1.05,0.0',
' 1 1.1,0.0 1.05,0.0',
' 2 2.0,0.0 2.1,0.0']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('distance_func', ['search_around_3d', search_around_3d])
def test_join_with_join_skycoord_3d(self, distance_func):
sc1 = SkyCoord([0, 1, 1.1, 2]*u.deg, [0, 0, 0, 0]*u.deg, [1, 1, 2, 1]*u.m)
sc2 = SkyCoord([0.5, 1.05, 2.1]*u.deg, [0, 0, 0]*u.deg, [1, 1, 1]*u.m)
t1 = Table([sc1], names=['sc'])
t2 = Table([sc2], names=['sc'])
join_func = join_skycoord(np.deg2rad(0.2) * u.m,
distance_func=distance_func)
t12 = table.join(t1, t2, join_funcs={'sc': join_func})
exp = ['sc_id sc_1 sc_2 ',
' deg,deg,m deg,deg,m ',
'----- ----------- ------------',
' 1 1.0,0.0,1.0 1.05,0.0,1.0',
' 2 2.0,0.0,1.0 2.1,0.0,1.0']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_1d(self):
c1 = [0, 1, 1.1, 2]
c2 = [0.5, 1.05, 2.1]
t1 = Table([c1], names=['col'])
t2 = Table([c2], names=['col'])
join_func = join_distance(0.2,
kdtree_args={'leafsize': 32},
query_args={'p': 2})
t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func})
exp = ['col_id col_1 col_2',
'------ ----- -----',
' 1 1.0 1.05',
' 1 1.1 1.05',
' 2 2.0 2.1',
' 3 0.0 --',
' 4 -- 0.5']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_1d_multikey(self):
from astropy.table.operations import _apply_join_funcs
c1 = [0, 1, 1.1, 1.2, 2]
id1 = [0, 1, 2, 2, 3]
o1 = ['a', 'b', 'c', 'd', 'e']
c2 = [0.5, 1.05, 2.1]
id2 = [0, 2, 4]
o2 = ['z', 'y', 'x']
t1 = Table([c1, id1, o1], names=['col', 'id', 'o1'])
t2 = Table([c2, id2, o2], names=['col', 'id', 'o2'])
join_func = join_distance(0.2)
join_funcs = {'col': join_func}
t12 = table.join(t1, t2, join_type='outer', join_funcs=join_funcs)
exp = ['col_id col_1 id o1 col_2 o2',
'------ ----- --- --- ----- ---',
' 1 1.0 1 b -- --',
' 1 1.1 2 c 1.05 y',
' 1 1.2 2 d 1.05 y',
' 2 2.0 3 e -- --',
' 2 -- 4 -- 2.1 x',
' 3 0.0 0 a -- --',
' 4 -- 0 -- 0.5 z']
assert str(t12).splitlines() == exp
left, right, keys = _apply_join_funcs(t1, t2, ('col', 'id'), join_funcs)
assert keys == ('col_id', 'id')
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_1d_quantity(self):
c1 = [0, 1, 1.1, 2] * u.m
c2 = [500, 1050, 2100] * u.mm
t1 = QTable([c1], names=['col'])
t2 = QTable([c2], names=['col'])
join_func = join_distance(20 * u.cm)
t12 = table.join(t1, t2, join_funcs={'col': join_func})
exp = ['col_id col_1 col_2 ',
' m mm ',
'------ ----- ------',
' 1 1.0 1050.0',
' 1 1.1 1050.0',
' 2 2.0 2100.0']
assert str(t12).splitlines() == exp
# Generate column name conflict
t2['col_id'] = [0, 0, 0]
t2['col__id'] = [0, 0, 0]
t12 = table.join(t1, t2, join_funcs={'col': join_func})
exp = ['col___id col_1 col_2 col_id col__id',
' m mm ',
'-------- ----- ------ ------ -------',
' 1 1.0 1050.0 0 0',
' 1 1.1 1050.0 0 0',
' 2 2.0 2100.0 0 0']
assert str(t12).splitlines() == exp
@pytest.mark.skipif('not HAS_SCIPY')
def test_join_with_join_distance_2d(self):
c1 = np.array([[0, 1, 1.1, 2],
[0, 0, 1, 0]]).transpose()
c2 = np.array([[0.5, 1.05, 2.1],
[0, 0, 0]]).transpose()
t1 = Table([c1], names=['col'])
t2 = Table([c2], names=['col'])
join_func = join_distance(0.2,
kdtree_args={'leafsize': 32},
query_args={'p': 2})
t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func})
exp = ['col_id col_1 [2] col_2 [2] ',
'------ ---------- -----------',
' 1 1.0 .. 0.0 1.05 .. 0.0',
' 2 2.0 .. 0.0 2.1 .. 0.0',
' 3 0.0 .. 0.0 -- .. --',
' 4 1.1 .. 1.0 -- .. --',
' 5 -- .. -- 0.5 .. 0.0']
assert str(t12).splitlines() == exp
class TestSetdiff():
def _setup(self, t_cls=Table):
lines1 = [' a b ',
' 0 foo ',
' 1 foo ',
' 1 bar ',
' 2 bar ']
lines2 = [' a b ',
' 0 foo ',
' 3 foo ',
' 4 bar ',
' 2 bar ']
lines3 = [' a b d ',
' 0 foo R1',
' 8 foo R2',
' 1 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls.read(lines3, format='ascii')
def test_default_same_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t2)
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---',
' 1 bar',
' 1 foo']
def test_default_same_tables(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t1)
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---']
def test_extra_col_left_table(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1)
def test_extra_col_right_table(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t3)
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---',
' 1 foo',
' 2 bar']
def test_keys(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t3, self.t1, keys=['a', 'b'])
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b d ',
'--- --- ---',
' 4 bar R4',
' 8 foo R2']
def test_missing_key(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.setdiff(self.t3, self.t1, keys=['a', 'd'])
class TestVStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' a b',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2[1]])
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'1.0 bar']
def test_stack_table_column(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2['a']])
assert out.masked is False
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 --',
'1.0 --']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.vstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.vstack([])
with pytest.raises(TypeError):
table.vstack(1)
with pytest.raises(TypeError):
table.vstack([self.t2, 1])
with pytest.raises(ValueError):
table.vstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='inner')
assert t12.masked is False
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
assert t12.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez']
t124 = table.vstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
assert t124.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez',
'0.0 foo',
'1.0 bar']
def test_stack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='outer')
assert t12.masked is False
assert t12.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5']
t124 = table.vstack([t1, t2, t4], join_type='outer')
assert t124.masked is False
assert t124.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5',
'0.0 foo --',
'1.0 bar --']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='inner')
assert ("The 'b' columns have incompatible types: {}"
.format([self.t1['b'].dtype.name, self.t3['b'].dtype.name])
in str(excinfo.value))
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='outer')
assert "The 'b' columns have incompatible types:" in str(excinfo.value)
with pytest.raises(TableMergeError):
table.vstack([self.t1, self.t2], join_type='exact')
t1_reshape = self.t1.copy()
t1_reshape['b'].shape = [2, 1]
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, t1_reshape])
assert "have different shape" in str(excinfo.value)
def test_vstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t4 = self.t4
t4['b'].mask[1] = True
t14 = table.vstack([t1, t4])
assert t14.masked is False
assert t14.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 foo',
'1.0 --']
def test_col_meta_merge_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].info.unit = 'cm'
t2['a'].info.unit = 'm'
t4['a'].info.unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%f'
t2['a'].info.format = '%f'
t4['a'].info.format = '%f'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
if operation_table_type is Table:
ctx = pytest.warns(metadata.MergeConflictWarning)
else:
ctx = nullcontext()
with ctx as warning_lines:
out = table.vstack([t1, t2, t4], join_type='inner')
if operation_table_type is Table:
assert len(warning_lines) == 2
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
# Check units are suitably ignored for a regular Table
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'1.000000 bar',
'2.000000 pez',
'3.000000 sez',
'0.000000 foo',
'1.000000 bar']
else:
# Check QTable correctly dealt with units.
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'0.000010 bar',
'0.002000 pez',
'0.003000 sez',
'0.000000 foo',
'1.000000 bar']
assert out['a'].info.unit == 'km'
assert out['a'].info.format == '%f'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
def test_col_meta_merge_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
t4['a'].unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%0d'
t2['a'].info.format = '%0d'
t4['a'].info.format = '%0d'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
# All these should pass through
t2['c'].unit = 'm'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with pytest.warns(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='outer')
assert len(warning_lines) == 2
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
assert out['a'].unit == 'km'
assert out['a'].info.format == '%0d'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
assert out['c'].info.unit == 'm'
assert out['c'].info.format == '%6s'
assert out['c'].info.description == 't2_c'
def test_vstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.vstack(self.t1)).all()
assert (self.t1 == table.vstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
len_col = len(col)
t = table.QTable([col], names=['a'])
cls_name = type(col).__name__
# Vstack works for these classes:
if isinstance(col, (u.Quantity, Time, SkyCoord,
BaseRepresentationOrDifferential)):
out = table.vstack([t, t])
assert len(out) == len_col * 2
if cls_name == 'SkyCoord':
# Argh, SkyCoord needs __eq__!!
assert skycoord_equal(out['a'][len_col:], col)
assert skycoord_equal(out['a'][:len_col], col)
elif 'Repr' in cls_name or 'Diff' in cls_name:
assert np.all(representation_equal(out['a'][:len_col], col))
assert np.all(representation_equal(out['a'][len_col:], col))
else:
assert np.all(out['a'][:len_col] == col)
assert np.all(out['a'][len_col:] == col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t])
assert ('vstack unavailable for mixin column type(s): {}'
.format(cls_name) in str(err.value))
# Check for outer stack which requires masking. Only Time supports
# this currently.
t2 = table.QTable([col], names=['b']) # different from col name for t
if cls_name == 'Time':
out = table.vstack([t, t2], join_type='outer')
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['b'][len_col:] == col)
assert np.all(out['a'].mask == [False] * len_col + [True] * len_col)
assert np.all(out['b'].mask == [True] * len_col + [False] * len_col)
# check directly stacking mixin columns:
out2 = table.vstack([t, t2['b']])
assert np.all(out['a'] == out2['a'])
assert np.all(out['b'] == out2['b'])
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t2], join_type='outer')
assert ('vstack requires masking' in str(err.value)
or 'vstack unavailable' in str(err.value))
def test_vstack_different_representation(self):
"""Test that representations can be mixed together."""
rep1 = CartesianRepresentation([1, 2]*u.km, [3, 4]*u.km, 1*u.km)
rep2 = SphericalRepresentation([0]*u.deg, [0]*u.deg, 10*u.km)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.vstack([t1, t2])
expected = CartesianRepresentation([1, 2, 10]*u.km,
[3, 4, 0]*u.km,
[1, 1, 0]*u.km)
assert np.all(representation_equal(t12['col0'], expected))
rep3 = UnitSphericalRepresentation([0]*u.deg, [0]*u.deg)
t3 = Table([rep3])
with pytest.raises(ValueError, match='loss of information'):
table.vstack([t1, t3])
class TestDStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t2['d'] = Time([1, 2], format='cxcsec')
self.t3 = t_cls({'a': [[5., 6.], [4., 3.]],
'b': [['foo', 'bar'], ['pez', 'sez']]},
names=('a', 'b'))
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
self.t5 = t_cls({'a': [[4., 2.], [1., 6.]],
'b': [['foo', 'pez'], ['bar', 'sez']]},
names=('a', 'b'))
self.t6 = t_cls.read([' a b c',
' 7. pez 2',
' 4. sez 6',
' 6. foo 3'], format='ascii')
@staticmethod
def compare_dstack(tables, out):
for ii, tbl in enumerate(tables):
for name, out_col in out.columns.items():
if name in tbl.colnames:
# Columns always compare equal
assert np.all(tbl[name] == out[name][:, ii])
# If input has a mask then output must have same mask
if hasattr(tbl[name], 'mask'):
assert np.all(tbl[name].mask == out[name].mask[:, ii])
# If input has no mask then output might have a mask (if other table
# is missing that column). If so then all mask values should be False.
elif hasattr(out[name], 'mask'):
assert not np.any(out[name].mask[:, ii])
else:
# Column missing for this table, out must have a mask with all True.
assert np.all(out[name].mask[:, ii])
def test_dstack_table_column(self, operation_table_type):
"""Stack a table with 3 cols and one column (gets auto-converted to Table).
"""
self._setup(operation_table_type)
t2 = self.t1.copy()
out = table.dstack([self.t1, t2['a']])
self.compare_dstack([self.t1, t2[('a',)]], out)
def test_dstack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t4['a'].mask[0] = True
# Test for non-masked table
t12 = table.dstack([t1, t2], join_type='outer')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a']) # noqa
assert type(t12['b']) is type(t1['b']) # noqa
self.compare_dstack([t1, t2], t12)
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type='outer')
assert type(t124) is operation_table_type
assert type(t124['a']) is type(t4['a']) # noqa
assert type(t124['b']) is type(t4['b']) # noqa
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t124['a']) is type(t4['a']) # noqa
assert type(t124['b']) is type(t4['b']) # noqa
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_multi_dimension_column(self, operation_table_type):
self._setup(operation_table_type)
t3 = self.t3
t5 = self.t5
t2 = self.t2
t35 = table.dstack([t3, t5])
assert type(t35) is operation_table_type
assert type(t35['a']) is type(t3['a']) # noqa
assert type(t35['b']) is type(t3['b']) # noqa
self.compare_dstack([t3, t5], t35)
with pytest.raises(TableMergeError):
table.dstack([t2, t3])
def test_dstack_different_length_table(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t2
t6 = self.t6
with pytest.raises(ValueError):
table.dstack([t2, t6])
def test_dstack_single_table(self):
self._setup(Table)
out = table.dstack(self.t1)
assert np.all(out == self.t1)
def test_dstack_representation(self):
rep1 = SphericalRepresentation([1, 2]*u.deg, [3, 4]*u.deg, 1*u.kpc)
rep2 = SphericalRepresentation([10, 20]*u.deg, [30, 40]*u.deg, 10*u.kpc)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.dstack([t1, t2])
assert np.all(representation_equal(t12['col0'][:, 0], rep1))
assert np.all(representation_equal(t12['col0'][:, 1], rep2))
def test_dstack_skycoord(self):
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
t1 = Table([sc1])
t2 = Table([sc2])
t12 = table.dstack([t1, t2])
assert skycoord_equal(sc1, t12['col0'][:, 0])
assert skycoord_equal(sc2, t12['col0'][:, 1])
class TestHStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' d e',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=True)
self.t4['a'].name = 'f'
self.t4['b'].name = 'g'
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_same_table(self, operation_table_type):
"""
From #2995, test that hstack'ing references to the same table has the
expected output.
"""
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t1])
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 bar']
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1[0], self.t2[1]])
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 3.0 sez 5']
def test_stack_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2['c']])
assert type(out['a']) is type(self.t1['a']) # noqa
assert type(out['b']) is type(self.t1['b']) # noqa
assert type(out['c']) is type(self.t2['c']) # noqa
assert out.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo 4',
'1.0 bar 5']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with pytest.warns(metadata.MergeConflictWarning) as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.hstack([])
with pytest.raises(TypeError):
table.hstack(1)
with pytest.raises(TypeError):
table.hstack([self.t2, 1])
with pytest.raises(ValueError):
table.hstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t3 = self.t3
t4 = self.t4
out = table.hstack([t1, t2], join_type='inner')
assert out.masked is False
assert type(out) is operation_table_type
assert type(out['a_1']) is type(t1['a']) # noqa
assert type(out['b_1']) is type(t1['b']) # noqa
assert type(out['a_2']) is type(t2['a']) # noqa
assert type(out['b_2']) is type(t2['b']) # noqa
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 2.0 pez 4',
'1.0 bar 3.0 sez 5']
# stacking as a list gives same result
out_list = table.hstack([t1, t2], join_type='inner')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2], join_type='outer')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2, t3, t4], join_type='outer')
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar',
' -- -- -- -- -- 6.0 9 -- --']
out = table.hstack([t1, t2, t3, t4], join_type='inner')
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
# For join_type exact, which will fail here because n_rows
# does not match
with pytest.raises(TableMergeError):
table.hstack([self.t1, self.t3], join_type='exact')
def test_hstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail()
self._setup(operation_table_type)
t1 = self.t1
t2 = operation_table_type(t1, copy=True, masked=True)
t2.meta.clear()
t2['b'].mask[1] = True
out = table.hstack([t1, t2])
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 --']
def test_table_col_rename(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2], join_type='inner',
uniq_col_name='{table_name}_{col_name}',
table_names=('left', 'right'))
assert out.masked is False
assert out.pformat() == ['left_a left_b right_a right_b c ',
'------ ------ ------- ------- ---',
' 0.0 foo 2.0 pez 4',
' 1.0 bar 3.0 sez 5']
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t3 = self.t3[:2]
t4 = self.t4
# Just set a bunch of meta and make sure it is the same in output
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
t1['a'].unit = 'cm'
t1['b'].info.description = 't1_b'
t4['f'].info.format = '%6s'
t1['b'].info.meta.update(meta1)
t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t3['d'].unit = 'm'
t3['d'].info.format = '%6s'
t3['d'].info.description = 't3_c'
out = table.hstack([t1, t3, t4], join_type='exact')
for t in [t1, t3, t4]:
for name in t.colnames:
for attr in ('meta', 'unit', 'format', 'description'):
assert getattr(out[name].info, attr) == getattr(t[name].info, attr)
# Make sure we got a copy of meta, not ref
t1['b'].info.meta['b'] = None
assert out['b'].info.meta['b'] == [1, 2]
def test_hstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.hstack(self.t1)).all()
assert (self.t1 == table.hstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col1 = mixin_cols['m']
col2 = col1[2:4] # Shorter version of col1
t1 = table.QTable([col1])
t2 = table.QTable([col2])
cls_name = type(col1).__name__
out = table.hstack([t1, t2], join_type='inner')
assert type(out['col0_1']) is type(out['col0_2']) # noqa
assert len(out) == len(col2)
# Check that columns are as expected.
if cls_name == 'SkyCoord':
assert skycoord_equal(out['col0_1'], col1[:len(col2)])
assert skycoord_equal(out['col0_2'], col2)
elif 'Repr' in cls_name or 'Diff' in cls_name:
assert np.all(representation_equal(out['col0_1'], col1[:len(col2)]))
assert np.all(representation_equal(out['col0_2'], col2))
else:
assert np.all(out['col0_1'] == col1[:len(col2)])
assert np.all(out['col0_2'] == col2)
# Time class supports masking, all other mixins do not
if cls_name == 'Time':
out = table.hstack([t1, t2], join_type='outer')
assert len(out) == len(t1)
assert np.all(out['col0_1'] == col1)
assert np.all(out['col0_2'][:len(col2)] == col2)
assert np.all(out['col0_2'].mask == [False, False, True, True])
# check directly stacking mixin columns:
out2 = table.hstack([t1, t2['col0']], join_type='outer')
assert np.all(out['col0_1'] == out2['col0_1'])
assert np.all(out['col0_2'] == out2['col0_2'])
else:
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err.value)
def test_unique(operation_table_type):
t = operation_table_type.read(
[' a b c d',
' 2 b 7.0 0',
' 1 c 3.0 5',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
], format='ascii')
tu = operation_table_type(np.sort(t[:-1]))
t_all = table.unique(t)
assert sort_eq(t_all.pformat(), tu.pformat())
t_s = t.copy()
del t_s['b', 'c', 'd']
t_all = table.unique(t_s)
assert sort_eq(t_all.pformat(), [' a ',
'---',
' 0',
' 1',
' 2'])
key1 = 'a'
t1a = table.unique(t, key1)
assert sort_eq(t1a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 7.0 0'])
t1b = table.unique(t, key1, keep='last')
assert sort_eq(t1b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 5.0 1'])
t1c = table.unique(t, key1, keep='none')
assert sort_eq(t1c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4'])
key2 = ['a', 'b']
t2a = table.unique(t, key2)
assert sort_eq(t2a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 1.0 7',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 7.0 0'])
t2b = table.unique(t, key2, keep='last')
assert sort_eq(t2b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1'])
t2c = table.unique(t, key2, keep='none')
assert sort_eq(t2c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 a 4.0 3'])
key2 = ['a', 'a']
with pytest.raises(ValueError) as exc:
t2a = table.unique(t, key2)
assert exc.value.args[0] == "duplicate key names"
with pytest.raises(ValueError) as exc:
table.unique(t, key2, keep=True)
assert exc.value.args[0] == (
"'keep' should be one of 'first', 'last', 'none'")
t1_m = operation_table_type(t1a, masked=True)
t1_m['a'].mask[1] = True
with pytest.raises(ValueError) as exc:
t1_mu = table.unique(t1_m)
assert exc.value.args[0] == (
"cannot use columns with masked values as keys; "
"remove column 'a' from keys and rerun unique()")
t1_mu = table.unique(t1_m, silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 b 7.0 0',
' -- c 3.0 5']
with pytest.raises(ValueError):
t1_mu = table.unique(t1_m, silent=True, keys='a')
t1_m = operation_table_type(t, masked=True)
t1_m['a'].mask[1] = True
t1_m['d'].mask[3] = True
# Test that multiple masked key columns get removed in the correct
# order
t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 a 4.0 --',
' 2 b 7.0 0',
' -- c 3.0 5']
def test_vstack_bytes(operation_table_type):
"""
Test for issue #5617 when vstack'ing bytes columns in Py3.
This is really an upsteam numpy issue numpy/numpy/#8403.
"""
t = operation_table_type([[b'a']], names=['a'])
assert t['a'].itemsize == 1
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 1
def test_vstack_unicode():
"""
Test for problem related to issue #5617 when vstack'ing *unicode*
columns. In this case the character size gets multiplied by 4.
"""
t = table.Table([['a']], names=['a'])
assert t['a'].itemsize == 4 # 4-byte / char for U dtype
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 4
def test_join_mixins_time_quantity():
"""
Test for table join using non-ndarray key columns.
"""
tm1 = Time([2, 1, 2], format='cxcsec')
q1 = [2, 1, 1] * u.m
idx1 = [1, 2, 3]
tm2 = Time([2, 3], format='cxcsec')
q2 = [2, 3] * u.m
idx2 = [10, 20]
t1 = Table([tm1, q1, idx1], names=['tm', 'q', 'idx'])
t2 = Table([tm2, q2, idx2], names=['tm', 'q', 'idx'])
# Output:
#
# <Table length=4>
# tm q idx_1 idx_2
# m
# object float64 int64 int64
# ------------------ ------- ----- -----
# 0.9999999999969589 1.0 2 --
# 2.00000000000351 1.0 3 --
# 2.00000000000351 2.0 1 10
# 3.000000000000469 3.0 -- 20
t12 = table.join(t1, t2, join_type='outer', keys=['tm', 'q'])
# Key cols are lexically sorted
assert np.all(t12['tm'] == Time([1, 2, 2, 3], format='cxcsec'))
assert np.all(t12['q'] == [1, 1, 2, 3] * u.m)
assert np.all(t12['idx_1'] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1]))
assert np.all(t12['idx_2'] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0]))
def test_join_mixins_not_sortable():
"""
Test for table join using non-ndarray key columns that are not sortable.
"""
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg')
t1 = Table([sc, [1, 2]], names=['sc', 'idx1'])
t2 = Table([sc, [10, 20]], names=['sc', 'idx2'])
with pytest.raises(TypeError, match='one or more key columns are not sortable'):
table.join(t1, t2, keys='sc')
def test_join_non_1d_key_column():
c1 = [[1, 2], [3, 4]]
c2 = [1, 2]
t1 = Table([c1, c2], names=['a', 'b'])
t2 = t1.copy()
with pytest.raises(ValueError, match="key column 'a' must be 1-d"):
table.join(t1, t2, keys='a')
def test_argsort_time_column():
"""Regression test for #10823."""
times = Time(['2016-01-01', '2018-01-01', '2017-01-01'])
t = Table([times], names=['time'])
i = t.argsort('time')
assert np.all(i == times.argsort())
def test_sort_indexed_table():
"""Test fix for #9473 and #6545 - and another regression test for #10823."""
t = Table([[1, 3, 2], [6, 4, 5]], names=('a', 'b'))
t.add_index('a')
t.sort('a')
assert np.all(t['a'] == [1, 2, 3])
assert np.all(t['b'] == [6, 5, 4])
t.sort('b')
assert np.all(t['b'] == [4, 5, 6])
assert np.all(t['a'] == [3, 2, 1])
times = ['2016-01-01', '2018-01-01', '2017-01-01']
tm = Time(times)
t2 = Table([tm, [3, 2, 1]], names=['time', 'flux'])
t2.sort('flux')
assert np.all(t2['flux'] == [1, 2, 3])
t2.sort('time')
assert np.all(t2['flux'] == [3, 1, 2])
assert
|
np.all(t2['time'] == tm[[0, 2, 1]])
|
numpy.all
|
import json
import inflect
import numpy as np
import requests
import tagme
from nltk.stem.porter import *
stemmer = PorterStemmer()
p = inflect.engine()
tagme.GCUBE_TOKEN = ""
def sort_dict_by_values(dictionary):
keys = []
values = []
for key, value in sorted(dictionary.items(), key=lambda item: (item[1], item[0]), reverse=True):
keys.append(key)
values.append(value)
return keys, values
def preprocess_relations(file, prop=False):
relations = {}
with open(file, encoding='utf-8') as f:
content = f.readlines()
for line in content:
split_line = line.split()
key = ' '.join(split_line[2:])[1:-3].lower()
key = ' '.join([stemmer.stem(word) for word in key.split()])
if key not in relations:
relations[key] = []
uri = split_line[0].replace('<', '').replace('>', '')
if prop is True:
uri_property = uri.replace('/ontology/', '/property/')
relations[key].extend([uri, uri_property])
else:
relations[key].append(uri)
return relations
def get_earl_entities(query, earl_url='http://localhost:4999'):
result = {}
result['question'] = query
result['entities'] = []
result['relations'] = []
THRESHOLD = 0.1
response = requests.post(f'{earl_url}/processQuery',
headers={"Content-Type": "application/json"},
json={"nlquery": query, "pagerankflag": False})
json_response = json.loads(response.text)
type_list = []
chunk = []
for i in json_response['ertypes']:
type_list.append(i)
for i in json_response['chunktext']:
chunk.append([i['surfacestart'], i['surfacelength']])
keys = list(json_response['rerankedlists'].keys())
reranked_lists = json_response['rerankedlists']
for i in range(len(keys)):
if type_list[i] == 'entity':
entity = {}
entity['uris'] = []
entity['surface'] = chunk[i]
for r in reranked_lists[keys[i]]:
if r[0] > THRESHOLD:
uri = {}
uri['uri'] = r[1]
uri['confidence'] = r[0]
entity['uris'].append(uri)
if entity['uris'] != []:
result['entities'].append(entity)
if type_list[i] == 'relation':
relation = {}
relation['uris'] = []
relation['surface'] = chunk[i]
for r in reranked_lists[keys[i]]:
if r[0] > THRESHOLD:
uri = {}
uri['uri'] = r[1]
uri['confidence'] = r[0]
relation['uris'].append(uri)
if relation['uris'] != []:
result['relations'].append(relation)
return result
def get_tag_me_entities(query):
threshold = 0.1
try:
response = requests.get("https://tagme.d4science.org/tagme/tag?lang=en&gcube-token={}&text={}"
.format('1b4eb12e-d434-4b30-8c7f-91b3395b96e8-843339462', query))
entities = []
for annotation in json.loads(response.text)['annotations']:
confidence = float(annotation['link_probability'])
if confidence > threshold:
entity = {}
uris = {}
uri = 'http://dbpedia.org/resource/' + annotation['title'].replace(' ', '_')
uris['uri'] = uri
uris['confidence'] = confidence
surface = [annotation['start'], annotation['end'] - annotation['start']]
entity['uris'] = [uris]
entity['surface'] = surface
entities.append(entity)
except:
entities = []
print('get_tag_me_entities: ', query)
return entities
def get_nliwod_entities(query, hashmap):
ignore_list = []
entities = []
singular_query = [stemmer.stem(word) if p.singular_noun(word) == False else stemmer.stem(p.singular_noun(word)) for
word in query.lower().split(' ')]
string = ' '.join(singular_query)
words = query.split(' ')
indexlist = {}
surface = []
current = 0
locate = 0
for i in range(len(singular_query)):
indexlist[current] = {}
indexlist[current]['len'] = len(words[i]) - 1
indexlist[current]['surface'] = [locate, len(words[i]) - 1]
current += len(singular_query[i]) + 1
locate += len(words[i]) + 1
for key in hashmap.keys():
if key in string and len(key) > 2 and key not in ignore_list:
e_list = list(set(hashmap[key]))
k_index = string.index(key)
if k_index in indexlist.keys():
surface = indexlist[k_index]['surface']
else:
for i in indexlist:
if k_index > i and k_index < (i + indexlist[i]['len']):
surface = indexlist[i]['surface']
break
for e in e_list:
r_e = {}
r_e['surface'] = surface
r_en = {}
r_en['uri'] = e
r_en['confidence'] = 0.3
r_e['uris'] = [r_en]
entities.append(r_e)
return entities
def get_spotlight_entities(query):
entities = []
data = {
'text': query,
'confidence': '0.4',
'support': '10'
}
headers = {"accept": "application/json"}
response = requests.get('http://api.dbpedia-spotlight.org/en/annotate', params=data, headers=headers)
try:
response_json = response.text.replace('@', '')
output = json.loads(response_json)
if 'Resources' in output.keys():
resource = output['Resources']
for item in resource:
entity = {}
uri = {}
uri['uri'] = item['URI']
uri['confidence'] = float(item['similarityScore'])
entity['uris'] = [uri]
entity['surface'] = [int(item['offset']), len(item['surfaceForm'])]
entities.append(entity)
except json.JSONDecodeError:
print('Spotlight:', query)
return entities
def get_falcon_entities(query):
entities = []
relations = []
headers = {
'Content-Type': 'application/json',
}
params = (
('mode', 'long'),
)
data = "{\"text\": \"" + query + "\"}"
response = requests.post('https://labs.tib.eu/falcon/api', headers=headers, params=params,
data=data.encode('utf-8'))
try:
output = json.loads(response.text)
for i in output['entities']:
ent = {}
ent['surface'] = ""
ent_uri = {}
ent_uri['confidence'] = 0.9
ent_uri['uri'] = i[0]
ent['uris'] = [ent_uri]
entities.append(ent)
for i in output['relations']:
rel = {}
rel['surface'] = ""
rel_uri = {}
rel_uri['confidence'] = 0.9
rel_uri['uri'] = i[0]
rel['uris'] = [rel_uri]
relations.append(rel)
except:
print('get_falcon_entities: ', query)
return entities, relations
def merge_entity(old_e, new_e):
for i in new_e:
exist = False
for j in old_e:
for k in j['uris']:
if i['uris'][0]['uri'] == k['uri']:
k['confidence'] = max(k['confidence'], i['uris'][0]['confidence'])
exist = True
if not exist:
old_e.append(i)
return old_e
def merge_relation(old_e, new_e):
for i in range(len(new_e)):
for j in range(len(old_e)):
if new_e[i]['surface'] == old_e[j]['surface']:
for i1 in range(len(new_e[i]['uris'])):
notexist = True
for j1 in range(len(old_e[j]['uris'])):
if new_e[i]['uris'][i1]['uri'] == old_e[j]['uris'][j1]['uri']:
old_e[j]['uris'][j1]['confidence'] = max(old_e[j]['uris'][j1]['confidence'],
new_e[i]['uris'][i1]['confidence'])
notexist = False
if notexist:
old_e[j]['uris'].append(new_e[i]['uris'][i1])
return old_e
import argparse
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('--qald_ver', type=str, required=True)
argparser.add_argument('--earl_url', default='http://localhost:4999', type=str)
args = argparser.parse_args()
with open(f'data/QALD/{args.qald_ver}.json', 'r', encoding='utf-8') as f:
data = json.load(f)
properties = preprocess_relations('data/dbpedia/dbpedia_3Eng_1_property.ttl', True)
print('properties: ', len(properties))
linked_data = []
count = 0
for q in data['questions']:
q_idx = [i for i, q_by_lang in enumerate(q['question']) if q_by_lang['language'] == 'en'][0]
query = q['question'][q_idx]['string']
earl = get_earl_entities(query, earl_url=args.earl_url)
tagme_e = get_tag_me_entities(query)
if len(tagme_e) > 0:
earl['entities'] = merge_entity(earl['entities'], tagme_e)
nliwod = get_nliwod_entities(query, properties)
if len(nliwod) > 0:
earl['relations'] = merge_entity(earl['relations'], nliwod)
spot_e = get_spotlight_entities(query)
if len(spot_e) > 0:
earl['entities'] = merge_entity(earl['entities'], spot_e)
e_falcon, r_falcon = get_falcon_entities(query)
if len(e_falcon) > 0:
earl['entities'] = merge_entity(earl['entities'], e_falcon)
if len(r_falcon) > 0:
earl['relations'] = merge_entity(earl['relations'], r_falcon)
esim = []
for i in earl['entities']:
i['uris'] = sorted(i['uris'], key=lambda k: k['confidence'], reverse=True)
esim.append(max([j['confidence'] for j in i['uris']]))
earl['entities'] =
|
np.array(earl['entities'])
|
numpy.array
|
import typing
import numpy as np
from sklearn.metrics import mean_absolute_percentage_error, r2_score, mean_squared_error
THRESHOLD = 0.15
NEGATIVE_WEIGHT = 1.1
def deviation_metric_one_sample(y_true: typing.Union[float, int], y_pred: typing.Union[float, int]) -> float:
"""
Реализация кастомной метрики для хакатона.
:param y_true: float, реальная цена
:param y_pred: float, предсказанная цена
:return: float, значение метрики
"""
deviation = (y_pred - y_true) / np.maximum(1e-8, y_true)
if np.abs(deviation) <= THRESHOLD:
return 0
elif deviation <= - 4 * THRESHOLD:
return 9 * NEGATIVE_WEIGHT
elif deviation < -THRESHOLD:
return NEGATIVE_WEIGHT * ((deviation / THRESHOLD) + 1) ** 2
elif deviation < 4 * THRESHOLD:
return ((deviation / THRESHOLD) - 1) ** 2
else:
return 9
def deviation_metric(y_true: np.array, y_pred: np.array) -> float:
return np.array([deviation_metric_one_sample(y_true[n], y_pred[n]) for n in range(len(y_true))]).mean()
def median_absolute_percentage_error(y_true: np.array, y_pred: np.array) -> float:
return np.median(
|
np.abs(y_pred-y_true)
|
numpy.abs
|
import random
import Levenshtein as Lev
import argparse
import cv2
import numpy as np
import os
import torch
from load_data import IMG_HEIGHT, IMG_WIDTH, NUM_WRITERS, letter2index, tokens, num_tokens, OUTPUT_MAX_LEN, index2letter
from modules_tro import normalize
from network_tro import ConTranModel
def read_image(file_name):
url = file_name + ".png"
if not os.path.exists(url):
print("Url doesn't exist:", url)
img = cv2.imread(url, 0)
if img is None:
print("Img is broken:", url)
rate = float(IMG_HEIGHT) / img.shape[0]
img = cv2.resize(img, (int(img.shape[1] * rate) + 1, IMG_HEIGHT),
interpolation=cv2.INTER_CUBIC) # INTER_AREA con error
img = img / 255. # 0-255 -> 0-1
img = 1. - img
img_width = img.shape[-1]
if img_width > IMG_WIDTH:
out_img = img[:, :IMG_WIDTH]
else:
out_img = np.zeros((IMG_HEIGHT, IMG_WIDTH), dtype="float32")
out_img[:, :img_width] = img
out_img = out_img.astype("float32")
mean = 0.5
std = 0.5
out_img_final = (out_img - mean) / std
return out_img_final
def label_padding(labels, num_tokens):
new_label_len = []
ll = [letter2index[i] for i in labels]
new_label_len.append(len(ll) + 2)
ll = np.array(ll) + num_tokens
ll = list(ll)
ll = [tokens["GO_TOKEN"]] + ll + [tokens["END_TOKEN"]]
num = OUTPUT_MAX_LEN - len(ll)
if not num == 0:
ll.extend([tokens["PAD_TOKEN"]] * num) # replace PAD_TOKEN
return ll
def prep_images(imgs):
random.shuffle(imgs)
final_imgs = imgs[:50]
if len(final_imgs) < 50:
while len(final_imgs) < 50:
num_cp = 50 - len(final_imgs)
final_imgs = final_imgs + imgs[:num_cp]
imgs = torch.from_numpy(np.array(final_imgs)).unsqueeze(0).cuda() # 1,50,64,216
return imgs
def test_writer(imgs, wid, label, model, out_dir):
imgs = prep_images(imgs)
with torch.no_grad():
f_xs = model.gen.enc_image(imgs)
label = label.unsqueeze(0)
f_xt, f_embed = model.gen.enc_text(label, f_xs.shape)
f_mix = model.gen.mix(f_xs, f_embed)
xg = model.gen.decode(f_mix, f_xt)
pred = model.rec(xg, label, img_width=torch.from_numpy(
|
np.array([IMG_WIDTH])
|
numpy.array
|
# --- built in ---
import math
# --- 3rd party ---
import numpy as np
# --- my module ---
from rlchemy.lib.data import buffers as rl_buffers
from test.utils import TestCase
class TestDataBuffersModule(TestCase):
"""Test rlchemy.lib.data.buffers module"""
def test_base_buffer(self):
capacity = 10
batch = 1
n_samples = 15 # test circular
buf = rl_buffers.BaseBuffer(capacity, batch=batch)
self.assertEqual(capacity, buf.capacity)
self.assertEqual(capacity, buf.slots)
self.assertEqual(batch, buf.batch)
self.assertEqual(0, buf.head)
self.assertEqual(0, buf.tail)
self.assertTrue(buf.isnull)
self.assertFalse(buf.isfull)
self.assertTrue(buf.ready_for_sample)
for i in range(n_samples):
buf.add({'a': ([i], [i+1])})
if i < capacity-1:
self.assertFalse(buf.isfull)
self.assertEqual(i+1, len(buf))
self.assertEqual(i+1, buf.len_slots())
self.assertEqual(0, buf.head)
else:
self.assertTrue(buf.isfull)
self.assertEqual(capacity, len(buf))
self.assertEqual(capacity, buf.len_slots())
self.assertEqual((i+1)%capacity, buf.tail)
exp = np.arange(n_samples-capacity, n_samples)
exp_a0 = np.roll(exp, n_samples % capacity)
exp_a1 = exp_a0 + 1
exp_a0 = np.expand_dims(exp_a0, axis=-1)
exp_a1 = np.expand_dims(exp_a1, axis=-1)
self.assertArrayEqual(exp_a0, buf.data['a'][0])
self.assertArrayEqual(exp_a1, buf.data['a'][1])
# test getitem
data = buf[np.arange(n_samples % capacity)]
exp_a0 = np.arange(n_samples - n_samples % capacity, n_samples)
exp_a1 = exp_a0 + 1
exp_a0 = np.expand_dims(exp_a0, axis=-1)
exp_a1 = np.expand_dims(exp_a1, axis=-1)
self.assertArrayEqual(exp_a0, data['a'][0])
self.assertArrayEqual(exp_a1, data['a'][1])
# test setitem
n = n_samples - capacity
new_data = np.arange(n - n_samples % capacity, n)
new_data = np.expand_dims(new_data, axis=-1)
new_data = {'a': (new_data, new_data+1)}
buf[np.arange(n_samples % capacity)] = new_data
n = n_samples - capacity - n_samples % capacity
exp_a0 = np.arange(n, n + capacity)
exp_a1 = exp_a0 + 1
exp_a0 = np.expand_dims(exp_a0, axis=-1)
exp_a1 = np.expand_dims(exp_a1, axis=-1)
self.assertArrayEqual(exp_a0, buf.data['a'][0])
self.assertArrayEqual(exp_a1, buf.data['a'][1])
# test update (should have the same results as setitem)
buf.update(new_data, indices=np.arange(n_samples % capacity))
self.assertArrayEqual(exp_a0, buf.data['a'][0])
self.assertArrayEqual(exp_a1, buf.data['a'][1])
# test ravel/unravel index
def test_ravel(indices):
self.assertArrayEqual(
np.ravel_multi_index(indices, (buf.slots, buf.batch)),
buf.ravel_index(indices))
test_ravel(([1, 2, 3], 0))
test_ravel(([1, 2, 3], [0]))
def test_unravel(indices):
self.assertArrayEqual(
np.unravel_index(indices, (buf.slots, buf.batch)),
buf.unravel_index(indices))
test_unravel([4, 5, 6])
test_unravel(7)
def test_base_buffer_multidim(self):
capacity = 20
batch = 2
dim = 2
n_samples = 15 # test circular
buf = rl_buffers.BaseBuffer(capacity, batch=batch)
data = np.arange(n_samples*batch*dim).reshape((n_samples, batch, dim))
for i in range(n_samples):
buf.add({'a': data[i]})
if (i+1)*batch < capacity:
self.assertFalse(buf.isfull)
self.assertEqual((i+1)*batch, len(buf))
self.assertEqual(i+1, buf.len_slots())
self.assertEqual(0, buf.head)
else:
self.assertTrue(buf.isfull)
self.assertEqual(capacity, len(buf))
self.assertEqual(capacity//batch, buf.len_slots())
self.assertEqual((i+1)%(capacity//batch), buf.tail)
exp = np.arange(n_samples*batch*dim-capacity*dim, n_samples*batch*dim)
exp = exp.reshape(-1, 2, 2)
exp = np.roll(exp, n_samples % (capacity//batch), axis=0)
self.assertArrayEqual(exp, buf.data['a'])
# test ravel/unravel index
def test_ravel(indices):
self.assertArrayEqual(
np.ravel_multi_index(indices, (buf.slots, buf.batch)),
buf.ravel_index(indices))
test_ravel(([1, 2, 3], 0))
test_ravel(([[1], [2], [3]], [0, 1]))
def test_unravel(indices):
self.assertArrayEqual(
np.unravel_index(indices, (buf.slots, buf.batch)),
buf.unravel_index(indices))
test_unravel([4, 5, 6])
test_unravel(7)
def test_base_buffer_auto_calc_space(self):
capacity = 10
batch = 1
buf = rl_buffers.BaseBuffer(capacity, batch=batch)
self.assertEqual(0, len(buf))
self.assertEqual(0, buf.len_slots())
self.assertEqual(capacity, buf.capacity)
self.assertEqual(capacity, buf.slots)
self.assertEqual(batch, buf.batch)
self.assertEqual(0, buf.head)
self.assertEqual(0, buf.tail)
self.assertTrue(buf.isnull)
self.assertFalse(buf.isfull)
self.assertTrue(buf.ready_for_sample)
capacity = 10
n_samples = 15 # test circular
buf = rl_buffers.BaseBuffer(capacity, batch=None)
self.assertEqual(0, len(buf))
self.assertEqual(0, buf.len_slots())
self.assertEqual(None, buf.capacity)
self.assertEqual(None, buf.slots)
self.assertEqual(None, buf.batch)
self.assertEqual(0, buf.head)
self.assertEqual(0, buf.tail)
self.assertTrue(buf.isnull)
self.assertFalse(buf.isfull)
self.assertTrue(buf.ready_for_sample)
buf.add({'a': [0, 1]})
self.assertEqual(2, len(buf))
self.assertEqual(1, buf.len_slots())
self.assertEqual(capacity, buf.capacity)
self.assertEqual(math.ceil(capacity/2), buf.slots)
self.assertEqual(2, buf.batch)
self.assertEqual(0, buf.head)
self.assertEqual(1, buf.tail)
self.assertFalse(buf.isnull)
self.assertFalse(buf.isfull)
self.assertTrue(buf.ready_for_sample)
def test_base_buffer_relative_index(self):
capacity = 10
batch = 1
n_samples = 15 # test circular
buf = rl_buffers.BaseBuffer(capacity, batch=batch)
for i in range(n_samples):
buf.add({'a': ([i], [i+1])})
head = n_samples%capacity
self.assertEqual(head, buf.head)
self.assertEqual(head, buf.tail)
# test int, slice key
data = buf.rel[1]
self.assertArrayEqual([head+1], data['a'][0])
self.assertArrayEqual([head+2], data['a'][1])
data = buf.rel[-1]
self.assertArrayEqual([n_samples-1], data['a'][0])
self.assertArrayEqual([n_samples], data['a'][1])
data = buf.rel[1:3]
exp = np.arange(2).reshape(-1, 1)
self.assertArrayEqual(exp+head+1, data['a'][0])
self.assertArrayEqual(exp+head+2, data['a'][1])
data = buf.rel[-3:-1]
exp =
|
np.arange(2, 0, -1)
|
numpy.arange
|
import numpy as np
from diautils import help
from diautils.config import to_conf
from bisect import bisect_left
from scipy.stats import norm
def siglog(v):
return np.sign(v) * np.log(1.0 + np.abs(v))
def sigexp(v):
sv = np.sign(v)
return sv * (np.exp(sv * v) - 1.0)
def z_norm(v, mean=None, std=None):
if len(v.shape) == 1:
if mean is None:
mean = v.mean()
if std is None:
std = v.std()
np.where(std == 0, 1, std)
return (v - mean) / std
else:
if mean is None:
mean = v.mean(axis=-2)
if std is None:
std = v.std(axis=-2)
np.where(std == 0, 1, std)
return (v - np.expand_dims(mean, -2)) / np.expand_dims(std, -2)
def z_denorm(v, mean=None, std=None):
if len(v.shape) == 1:
return v * std + mean
else:
return (v.T * std + mean).T
def siglog_norm(v, mean=None, std=None):
return siglog(z_norm(v, mean, std))
def siglog_denorm(v, mean=None, std=None):
return z_denorm(sigexp(v), mean, std)
def tanh_siglog_norm(v, mean=None, std=None, alpha=1.0):
return np.tanh(alpha * siglog_norm(v, mean, std))
def tanh_siglog_denorm(v, mean=None, std=None, alpha=1.0):
return siglog_denorm(np.arctanh(v) / alpha, mean, std)
class Distrib1d:
def __init__(self, sample, pre_norm, normal=None, mean=None, std=None, alpha=1.0):
self.sample = sample
self.lower_bound = sample[0]
self.upper_bound = sample[-1]
self.max_bound = np.maximum(np.abs(self.upper_bound), np.abs(self.lower_bound))
self.bounds = (self.lower_bound, self.upper_bound)
self.num_sample = len(sample)
self.prob_step = 1 / (self.num_sample + 1)
self.sample_probs = np.arange(1, self.num_sample + 1) * self.prob_step
self.pre_norm = pre_norm
if normal is None:
self.normal = norm.ppf(self.sample_probs)
else:
self.normal = normal
if self.pre_norm:
self.lower_bound_normal = self.normal[1]
self.upper_bound_normal = self.normal[-2]
else:
self.lower_bound_normal = self.normal[0]
self.upper_bound_normal = self.normal[-1]
self.max_bound_normal = np.maximum(np.abs(self.upper_bound_normal), np.abs(self.lower_bound_normal))
self.bounds_normal = (self.lower_bound_normal, self.upper_bound_normal)
self.mean = mean
self.std = std
self.alpha = alpha
def clip(self, data):
return np.clip(data, self.lower_bound, self.upper_bound)
def clip_normal(self, data):
return np.clip(data, self.lower_bound_normal, self.upper_bound_normal)
def normalize(self, data):
if not isinstance(data, np.ndarray):
data = np.array(data)
if self.pre_norm:
data = tanh_siglog_norm(data, self.mean, self.std, self.alpha)
return data
def denormalize(self, data):
if not isinstance(data, np.ndarray):
data = np.array(data)
if self.pre_norm:
data = tanh_siglog_denorm(np.clip(data, -1 + 1e-15, 1 - 1e-15), self.mean, self.std, self.alpha)
return data
def search(self, data):
data = self.normalize(data)
pos = np.empty(len(data), np.int32)
for i, v in enumerate(data):
pos[i] = bisect_left(self.sample, v)
return pos, data
def probs(self, data):
pos, data = self.search(data)
spos = pos - 1
lbound = self.sample[spos]
ubound = self.sample[pos]
dbound = ubound - lbound
np.where(dbound == 0, 1, dbound)
alphas = (data - lbound) / dbound
return self.sample_probs[spos] + alphas * self.prob_step, data
def as_normal(self, data, clip=False):
if not isinstance(data, np.ndarray):
data = np.array(data)
if clip:
data = self.clip(data)
probs, data_n = self.probs(data.flatten())
return norm.ppf(probs).reshape(data.shape), data_n.reshape(data.shape)
def as_raw(self, data, clip=False):
if not isinstance(data, np.ndarray):
data = np.array(data)
data_flat = data.flatten()
if clip:
data_flat = self.clip_normal(data_flat)
probs = norm.cdf(data_flat)
pos_r = probs / self.prob_step
pos = np.floor(pos_r).astype(int)
alphas = pos_r - pos
spos = pos - 1
np.where(pos == len(self.sample), len(self.sample) - 1, pos)
np.where(spos == -1, 0, spos)
lbound = self.sample[spos]
ubound = self.sample[pos]
dbound = ubound - lbound
data_normed = lbound + alphas * dbound
data_normed = data_normed.reshape(data.shape)
data_raw = self.denormalize(data_normed)
return data_raw, data_normed
def meta(self):
return {
'type': '1d',
'pre_norm': self.pre_norm,
'mean': self.mean,
'std': self.std,
'num_sample': self.num_sample,
'lower_bound': self.lower_bound,
'upper_bound': self.upper_bound,
'prob_step': self.prob_step,
'alpha': self.alpha
}
class DistribNd:
def __init__(self, sample, pre_norm, normal=None, mean=None, std=None, alpha=1.0):
self.sample = sample
self.lower_bound = sample.T[0]
self.upper_bound = sample.T[-1]
self.max_bound = np.maximum(np.abs(self.upper_bound), np.abs(self.lower_bound))
self.bounds = np.stack((self.lower_bound, self.upper_bound), axis=-1)
self.num_sample = sample.shape[-1]
self.prob_step = 1 / (self.num_sample + 1)
self.sample_probs = np.arange(1, self.num_sample + 1) * self.prob_step
self.pre_norm = pre_norm
if normal is None:
self.normal = norm.ppf(self.sample_probs)
else:
self.normal = normal
if self.pre_norm:
self.lower_bound_normal = self.normal[1]
self.upper_bound_normal = self.normal[-2]
else:
self.lower_bound_normal = self.normal[0]
self.upper_bound_normal = self.normal[-1]
self.max_bound_normal = np.maximum(np.abs(self.upper_bound_normal), np.abs(self.lower_bound_normal))
self.bounds_normal = (self.lower_bound_normal, self.upper_bound_normal)
self.mean = mean
self.std = std
self.alpha = alpha
def clip(self, data):
return np.clip(data.T, self.lower_bound, self.upper_bound).T
def clip_normal(self, data):
return np.clip(data.T, self.lower_bound_normal, self.upper_bound_normal).T
def normalize(self, data):
if not isinstance(data, np.ndarray):
data = np.array(data)
if self.pre_norm:
data = tanh_siglog_norm(data, self.mean, self.std, self.alpha)
return data
def denormalize(self, data):
if not isinstance(data, np.ndarray):
data = np.array(data)
if self.pre_norm:
data = tanh_siglog_denorm(np.clip(data, -1 + 1e-15, 1 - 1e-15), self.mean, self.std, self.alpha)
return data
def search(self, data):
data = self.normalize(data)
data_flat = data.reshape((-1, data.shape[-1]))
pos_flat =
|
np.empty_like(data_flat, np.int32)
|
numpy.empty_like
|
__author__ = 'metjush'
# Implementation of a Classification Decision Tree based on the ID3 algorithm
# ===========================================================================
#
# inputs are numpy arrays X (data matrix of size m*n, where m=sample size and n=feature size)
# and y (label vector, binary or multiclass)
#
# The Tree Classifier is an object that can take input data to train itself, supports cross-validation,
# scoring (based on given ground truth, different score methods = accuracy, matthews, F1), prediction, and
# description of decision rules
import numpy as np
import warnings
from TreeNode import Node
import json
class ClassificationTree:
def __init__(self, depth_limit=None, impurity="gini"):
#depth limit of the tree
self.depth_limit = depth_limit if type(depth_limit) in set({int, float, np.int64, np.float64}) else np.inf
#an array that holds all the nodes created during training
#each level is a separate list
self.nodes = [[]]
#whether the model has been trained
self.trained = False
#set the desired impurity measure
self.impurity = impurity
#dimensions of the supplied training data
self.dimensions = 0
#helper functions
#__classshares() calculates the proportions of each class in the supplied label vector
def __classshares(self, labels):
classes, counts = np.unique(labels, return_counts=True)
shares = ((counts*1.) / len(labels)).reshape((len(classes),1))
return classes, shares
#__bestguess() finds the most probable class in the supplied label vector
def __bestguess(self, labels):
classes, shares = self.__classshares(labels)
max_index = np.argmax(shares)
return classes[max_index]
#__entropy() calculates the entropy of the input dataset
#labels are the data labels
def __entropy(self, labels):
if len(labels) == 0:
return 0.
classes, props = self.__classshares(labels)
entropy = -np.dot( props.T, np.log2(props+0.00001) )
return entropy[0][0]
#__gini() is an alternative impurity calculation to entropy
def __gini(self, labels):
if len(labels) == 0:
return 0.
classes, props = self.__classshares(labels)
gini = 1 - np.dot(props.T, props)
return gini[0][0]
#__impurity() is a wrapper for impurity calculations (entropy/gini)
def __impurity(self, labels):
if self.impurity == "gini":
return self.__gini(labels)
elif self.impurity == "entropy":
return self.__entropy(labels)
else:
return self.__gini(labels)
#__bestsplit() finds the split that results into lowest entropy
def __bestsplit(self, feature, labels):
values = np.unique(feature)
bestentropy = np.inf
bestsplit = 0
#check if the number of values isn't too large
if len(values) > 10:
minv = values.min()
maxv = values.max()
values = np.arange(minv, maxv, (maxv-minv)/10.)
for v in values:
leftmask = feature <= v
rightmask = feature > v
leftentropy = self.__impurity(labels[leftmask])
rightentropy = self.__impurity(labels[rightmask])
#weighted mean of impurity
mean_entropy = (np.sum(leftmask)*leftentropy + np.sum(rightmask)*rightentropy) / len(labels)
if mean_entropy < bestentropy:
bestentropy = mean_entropy
bestsplit = v
return bestsplit, bestentropy
#__algorithm() is the main function for training the decision tree classifier
#it proceeds as follows:
# 1. calculate entropy at root node
# 2. if entropy at root node is zero, there is only one class, so create a terminal node
# and end the algorithm
# 3. if entropy is positive, start searching through possible splits
# 4. for each feature, determine the smallest entropy if the set is split along this feature
# 5. pick the feature with smallest entropy, split the tree
# 6. if the optimal split results into putting all samples down one branch, make the node terminal
# 7. move down the two branches and repeat from 1.
def __algorithm(self, S, labels, level=0, par_node=None, left=False, terminal_flag=False):
#calculate initial entropy
null_entropy = self.__impurity(labels)
#check if everyone is in the same class
if null_entropy <= 0. or level >= self.depth_limit or terminal_flag:
#terminate the algorithm, everyone's been classified or maximum depth has been reached
final_node = Node(parent=par_node,level=level,entropy=null_entropy)
final_node.outcome[0] = self.__bestguess(labels)
self.nodes[level].extend( [final_node] )
return final_node
else:
#go over all the features in this dataset
features = range(S.shape[1])
min_entropy = np.inf
best_split = [0,0] #this will hold feature number and threshold value for the best split
for f in features:
#try all possible splits along this feature
#return the best (lowest) entropy
#if this entropy is smaller then current minimum, update
Sfeat = S[:,f]
split, entropy = self.__bestsplit(Sfeat, labels)
if entropy < min_entropy:
min_entropy = entropy
best_split = [f, split]
new_node = Node(feature=best_split[0], threshold=best_split[1], parent=par_node, level=level, entropy=min_entropy)
self.nodes[level].extend( [new_node] )
#split dataset
#check if S is a vector
if len(S.shape) == 1:
#S is a one-feature vector
S = S.reshape((len(S),1))
leftMask = S[:,best_split[0]] <= best_split[1]
rightMask = S[:,best_split[0]] > best_split[1]
features.remove(best_split[0])
leftLabels = labels[leftMask]
rightLabels = labels[rightMask]
# check if you shouldn't terminate here
# when the split puts all samples into left or right branch
if leftMask.all():
new_node.make_terminal(self.__bestguess(leftLabels))
return new_node
if rightMask.all():
new_node.make_terminal(self.__bestguess(rightLabels))
return new_node
if len(features) == 0:
leftS = S[leftMask,:]
rightS = S[rightMask,:]
terminal_flag = True
else:
leftS = S[leftMask,:][:,features]
rightS = S[rightMask,:][:,features]
#check if you shouldn't terminate here
if len(leftS) == 0 or leftS.shape[1] == 0:
new_node.make_terminal(self.__bestguess(rightLabels))
return new_node
if len(rightS) == 0 or rightS.shape[1] == 0:
new_node.make_terminal(self.__bestguess(leftLabels))
return new_node
#check if a level below you already exists
try:
self.nodes[level+1]
except IndexError:
self.nodes.append([])
#recursively call self again on the two children nodes
new_node.outcome[0] = self.__algorithm(leftS,leftLabels,level=level+1,par_node=new_node,terminal_flag=terminal_flag)
new_node.outcome[1] = self.__algorithm(rightS,rightLabels,level=level+1,par_node=new_node,terminal_flag=terminal_flag)
return new_node
#print("Tree grown")
#__classify() takes one sample x and classifies it into a label
def __classify(self, x):
node = self.nodes[0][0]
while isinstance(node.outcome[0], Node):
val = x[node.feature]
x = np.delete(x, node.feature)
node = node.decide(val)
return node.outcome[0]
#__untrain() removes old learned nodes when a new train() is called on a trained tree
def __untrain(self):
self.trained = False
self.nodes = [[]]
#__numpify() takes a regular python list and turns it into a numpy array
def __numpify(self, array):
numpied = np.array(array)
if numpied.dtype in ['int64', 'float64']:
return numpied
else:
return False
#__node_count() returns the total number of nodes
def __node_count(self):
if not self.trained:
return 0
else:
n = 0
for level in self.nodes:
n += len(level)
return n
# train() is the function the user calls to train the tree. It's mainly a wrapper for the __algorithm() function
def train(self, X, y):
#check dimensions
if not len(X) == len(y):
raise IndexError("The number of samples in X and y do not match")
#check if X and y are numpy arrays
if type(X) is not np.ndarray:
X = self.__numpify(X)
if not X:
raise TypeError("input dataset X is not a valid numeric array")
if type(y) is not np.ndarray:
y = self.__numpify(y)
if not y:
raise TypeError("input label vector y is not a valid numeric array")
if self.trained:
self.__untrain()
self.__algorithm(X, y)
self.trained = True
self.dimensions = X.shape[1]
# once the tree has been trained, you can call the predict() function to generate predicted labels for the supplied dataset
def predict(self, X):
if not self.trained:
raise RuntimeError("The decision tree classifier hasn't been trained yet")
if not X.shape[1] == self.dimensions:
raise IndexError("The supplied dataset has %d features, which do not match %d features from training" % (X.shape[1], self.dimensions))
yhat = np.zeros(len(X))
for i,x in enumerate(X):
yhat[i] = self.__classify(x)
return yhat
# one the tree has been trained, the evaluate() function scores the prediction compared to supplied ground truth.
# there are three scoring methods implemented:
# F1 score is the default:
# its formula is (2*precision*recall)/(precision+recall)
# its preferable to simple accuracy when classes are not balanced
# Accuracy is a simple accuracy measure (percentage of samples correctly classified)
# Matthews correlation coefficient is an alternative to the F1 score for evaluating an algorithm
# when classes are not balanced
def __f1(self, y, yhat):
# check if this is a multi-class problem
classes, _ = self.__classshares(y)
if len(classes) <= 2:
# binary F1
accurate = y == yhat
positive = np.sum(y == 1)
hatpositive =
|
np.sum(yhat == 1)
|
numpy.sum
|
# This file is part of Patsy
# Copyright (C) 2012-2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# R-compatible spline basis functions
# These are made available in the patsy.* namespace
__all__ = ["bs"]
import numpy as np
from patsy.util import have_pandas, no_pickling, assert_no_pickling
from patsy.state import stateful_transform
if have_pandas:
import pandas
def _eval_bspline_basis(x, knots, degree):
try:
from scipy.interpolate import splev
except ImportError: # pragma: no cover
raise ImportError("spline functionality requires scipy")
# 'knots' are assumed to be already pre-processed. E.g. usually you
# want to include duplicate copies of boundary knots; you should do
# that *before* calling this constructor.
knots = np.atleast_1d(np.asarray(knots, dtype=float))
assert knots.ndim == 1
knots.sort()
degree = int(degree)
x = np.atleast_1d(x)
if x.ndim == 2 and x.shape[1] == 1:
x = x[:, 0]
assert x.ndim == 1
# XX FIXME: when points fall outside of the boundaries, splev and R seem
# to handle them differently. I don't know why yet. So until we understand
# this and decide what to do with it, I'm going to play it safe and
# disallow such points.
if np.min(x) < np.min(knots) or np.max(x) > np.max(knots):
raise NotImplementedError("some data points fall outside the "
"outermost knots, and I'm not sure how "
"to handle them. (Patches accepted!)")
# Thanks to <NAME> for explaining splev. It's not well
# documented, but basically it computes an arbitrary b-spline basis
# given knots and degree on some specified points (or derivatives
# thereof, but we don't use that functionality), and then returns some
# linear combination of these basis functions. To get out the basis
# functions themselves, we use linear combinations like [1, 0, 0], [0,
# 1, 0], [0, 0, 1].
# NB: This probably makes it rather inefficient (though I haven't checked
# to be sure -- maybe the fortran code actually skips computing the basis
# function for coefficients that are zero).
# Note: the order of a spline is the same as its degree + 1.
# Note: there are (len(knots) - order) basis functions.
n_bases = len(knots) - (degree + 1)
basis = np.empty((x.shape[0], n_bases), dtype=float)
for i in range(n_bases):
coefs = np.zeros((n_bases,))
coefs[i] = 1
basis[:, i] = splev(x, (knots, coefs, degree))
return basis
def _R_compat_quantile(x, probs):
#return np.percentile(x, 100 * np.asarray(probs))
probs = np.asarray(probs)
quantiles = np.asarray([np.percentile(x, 100 * prob)
for prob in probs.ravel(order="C")])
return quantiles.reshape(probs.shape, order="C")
def test__R_compat_quantile():
def t(x, prob, expected):
assert np.allclose(_R_compat_quantile(x, prob), expected)
t([10, 20], 0.5, 15)
t([10, 20], 0.3, 13)
t([10, 20], [0.3, 0.7], [13, 17])
t(list(range(10)), [0.3, 0.7], [2.7, 6.3])
class BS(object):
"""bs(x, df=None, knots=None, degree=3, include_intercept=False, lower_bound=None, upper_bound=None)
Generates a B-spline basis for ``x``, allowing non-linear fits. The usual
usage is something like::
y ~ 1 + bs(x, 4)
to fit ``y`` as a smooth function of ``x``, with 4 degrees of freedom
given to the smooth.
:arg df: The number of degrees of freedom to use for this spline. The
return value will have this many columns. You must specify at least one
of ``df`` and ``knots``.
:arg knots: The interior knots to use for the spline. If unspecified, then
equally spaced quantiles of the input data are used. You must specify at
least one of ``df`` and ``knots``.
:arg degree: The degree of the spline to use.
:arg include_intercept: If ``True``, then the resulting
spline basis will span the intercept term (i.e., the constant
function). If ``False`` (the default) then this will not be the case,
which is useful for avoiding overspecification in models that include
multiple spline terms and/or an intercept term.
:arg lower_bound: The lower exterior knot location.
:arg upper_bound: The upper exterior knot location.
A spline with ``degree=0`` is piecewise constant with breakpoints at each
knot, and the default knot positions are quantiles of the input. So if you
find yourself in the situation of wanting to quantize a continuous
variable into ``num_bins`` equal-sized bins with a constant effect across
each bin, you can use ``bs(x, num_bins - 1, degree=0)``. (The ``- 1`` is
because one degree of freedom will be taken by the intercept;
alternatively, you could leave the intercept term out of your model and
use ``bs(x, num_bins, degree=0, include_intercept=True)``.
A spline with ``degree=1`` is piecewise linear with breakpoints at each
knot.
The default is ``degree=3``, which gives a cubic b-spline.
This is a stateful transform (for details see
:ref:`stateful-transforms`). If ``knots``, ``lower_bound``, or
``upper_bound`` are not specified, they will be calculated from the data
and then the chosen values will be remembered and re-used for prediction
from the fitted model.
Using this function requires scipy be installed.
.. note:: This function is very similar to the R function of the same
name. In cases where both return output at all (e.g., R's ``bs`` will
raise an error if ``degree=0``, while patsy's will not), they should
produce identical output given identical input and parameter settings.
.. warning:: I'm not sure on what the proper handling of points outside
the lower/upper bounds is, so for now attempting to evaluate a spline
basis at such points produces an error. Patches gratefully accepted.
.. versionadded:: 0.2.0
"""
def __init__(self):
self._tmp = {}
self._degree = None
self._all_knots = None
def memorize_chunk(self, x, df=None, knots=None, degree=3,
include_intercept=False,
lower_bound=None, upper_bound=None):
args = {"df": df,
"knots": knots,
"degree": degree,
"include_intercept": include_intercept,
"lower_bound": lower_bound,
"upper_bound": upper_bound,
}
self._tmp["args"] = args
# XX: check whether we need x values before saving them
x = np.atleast_1d(x)
if x.ndim == 2 and x.shape[1] == 1:
x = x[:, 0]
if x.ndim > 1:
raise ValueError("input to 'bs' must be 1-d, "
"or a 2-d column vector")
# There's no better way to compute exact quantiles than memorizing
# all data.
self._tmp.setdefault("xs", []).append(x)
def memorize_finish(self):
tmp = self._tmp
args = tmp["args"]
del self._tmp
if args["degree"] < 0:
raise ValueError("degree must be greater than 0 (not %r)"
% (args["degree"],))
if int(args["degree"]) != args["degree"]:
raise ValueError("degree must be an integer (not %r)"
% (self._degree,))
# These are guaranteed to all be 1d vectors by the code above
x = np.concatenate(tmp["xs"])
if args["df"] is None and args["knots"] is None:
raise ValueError("must specify either df or knots")
order = args["degree"] + 1
if args["df"] is not None:
n_inner_knots = args["df"] - order
if not args["include_intercept"]:
n_inner_knots += 1
if n_inner_knots < 0:
raise ValueError("df=%r is too small for degree=%r and "
"include_intercept=%r; must be >= %s"
% (args["df"], args["degree"],
args["include_intercept"],
# We know that n_inner_knots is negative;
# if df were that much larger, it would
# have been zero, and things would work.
args["df"] - n_inner_knots))
if args["knots"] is not None:
if len(args["knots"]) != n_inner_knots:
raise ValueError("df=%s with degree=%r implies %s knots, "
"but %s knots were provided"
% (args["df"], args["degree"],
n_inner_knots, len(args["knots"])))
else:
# Need to compute inner knots
knot_quantiles = np.linspace(0, 1, n_inner_knots + 2)[1:-1]
inner_knots = _R_compat_quantile(x, knot_quantiles)
if args["knots"] is not None:
inner_knots = args["knots"]
if args["lower_bound"] is not None:
lower_bound = args["lower_bound"]
else:
lower_bound = np.min(x)
if args["upper_bound"] is not None:
upper_bound = args["upper_bound"]
else:
upper_bound = np.max(x)
if lower_bound > upper_bound:
raise ValueError("lower_bound > upper_bound (%r > %r)"
% (lower_bound, upper_bound))
inner_knots = np.asarray(inner_knots)
if inner_knots.ndim > 1:
raise ValueError("knots must be 1 dimensional")
if np.any(inner_knots < lower_bound):
raise ValueError("some knot values (%s) fall below lower bound "
"(%r)"
% (inner_knots[inner_knots < lower_bound],
lower_bound))
if np.any(inner_knots > upper_bound):
raise ValueError("some knot values (%s) fall above upper bound "
"(%r)"
% (inner_knots[inner_knots > upper_bound],
upper_bound))
all_knots = np.concatenate(([lower_bound, upper_bound] * order,
inner_knots))
all_knots.sort()
self._degree = args["degree"]
self._all_knots = all_knots
def transform(self, x, df=None, knots=None, degree=3,
include_intercept=False,
lower_bound=None, upper_bound=None):
basis = _eval_bspline_basis(x, self._all_knots, self._degree)
if not include_intercept:
basis = basis[:, 1:]
if have_pandas:
if isinstance(x, (pandas.Series, pandas.DataFrame)):
basis = pandas.DataFrame(basis)
basis.index = x.index
return basis
__getstate__ = no_pickling
bs = stateful_transform(BS)
def test_bs_compat():
from patsy.test_state import check_stateful
from patsy.test_splines_bs_data import (R_bs_test_x,
R_bs_test_data,
R_bs_num_tests)
lines = R_bs_test_data.split("\n")
tests_ran = 0
start_idx = lines.index("--BEGIN TEST CASE--")
while True:
if not lines[start_idx] == "--BEGIN TEST CASE--":
break
start_idx += 1
stop_idx = lines.index("--END TEST CASE--", start_idx)
block = lines[start_idx:stop_idx]
test_data = {}
for line in block:
key, value = line.split("=", 1)
test_data[key] = value
# Translate the R output into Python calling conventions
kwargs = {
"degree": int(test_data["degree"]),
# integer, or None
"df": eval(test_data["df"]),
# np.array() call, or None
"knots": eval(test_data["knots"]),
}
if test_data["Boundary.knots"] != "None":
lower, upper = eval(test_data["Boundary.knots"])
kwargs["lower_bound"] = lower
kwargs["upper_bound"] = upper
kwargs["include_intercept"] = (test_data["intercept"] == "TRUE")
# Special case: in R, setting intercept=TRUE increases the effective
# dof by 1. Adjust our arguments to match.
# if kwargs["df"] is not None and kwargs["include_intercept"]:
# kwargs["df"] += 1
output = np.asarray(eval(test_data["output"]))
if kwargs["df"] is not None:
assert output.shape[1] == kwargs["df"]
# Do the actual test
check_stateful(BS, False, R_bs_test_x, output, **kwargs)
tests_ran += 1
# Set up for the next one
start_idx = stop_idx + 1
assert tests_ran == R_bs_num_tests
test_bs_compat.slow = 1
# This isn't checked by the above, because R doesn't have zero degree
# b-splines.
def test_bs_0degree():
x = np.logspace(-1, 1, 10)
result = bs(x, knots=[1, 4], degree=0, include_intercept=True)
assert result.shape[1] == 3
expected_0 = np.zeros(10)
expected_0[x < 1] = 1
assert np.array_equal(result[:, 0], expected_0)
expected_1 = np.zeros(10)
expected_1[(x >= 1) & (x < 4)] = 1
assert
|
np.array_equal(result[:, 1], expected_1)
|
numpy.array_equal
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 1 20:38:23 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
ref:http://www.richwareham.com/little-planet-projection/
"""
#Stereographic Projection /Stereographic 'little/tiny planet'
import numpy as np
def output_coord_to_r_theta(coords):
"""Convert co-ordinates in the output image to r, theta co-ordinates.
The r co-ordinate is scaled to range from from 0 to 1. The theta
co-ordinate is scaled to range from 0 to 1.
A Nx2 array is returned with r being the first column and theta being
the second.
"""
# Calculate x- and y-co-ordinate offsets from the centre:
x_offset = coords[:,0] - (output_shape[1]/2)
y_offset = coords[:,1] - (output_shape[0]/2)
# Calculate r and theta in pixels and radians:
r = np.sqrt(x_offset ** 2 + y_offset ** 2)
theta = np.arctan2(y_offset, x_offset)
# The maximum value r can take is the diagonal corner:
max_x_offset, max_y_offset = output_shape[1]/2, output_shape[0]/2
max_r = np.sqrt(max_x_offset ** 2 + max_y_offset ** 2)
# Scale r to lie between 0 and 1
r = r / max_r
# arctan2 returns an angle in radians between -pi and +pi. Re-scale
# it to lie between 0 and 1
theta = (theta + np.pi) / (2*np.pi)
# Stack r and theta together into one array. Note that r and theta are initially
# 1-d or "1xN" arrays and so we vertically stack them and then transpose
# to get the desired output.
return np.vstack((r, theta)).T
def r_theta_to_input_coords(r_theta):
"""Convert a Nx2 array of r, theta co-ordinates into the corresponding
co-ordinates in the input image.
Return a Nx2 array of input image co-ordinates.
"""
# Extract r and theta from input
r, theta = r_theta[:,0], r_theta[:,1]
# Theta wraps at the side of the image. That is to say that theta=1.1
# is equivalent to theta=0.1 => just extract the fractional part of
# theta
theta = theta -
|
np.floor(theta)
|
numpy.floor
|
####################################################################
# #
# THIS FILE IS PART OF THE pycollada LIBRARY SOURCE CODE. #
# USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS #
# GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE #
# IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. #
# #
# THE pycollada SOURCE CODE IS (C) COPYRIGHT 2011 #
# by <NAME> and contributors #
# #
####################################################################
"""Module containing classes and functions for the <polylist> primitive."""
import numpy
from collada import primitive
from collada import triangleset
from collada.common import E, tag
from collada.common import DaeIncompleteError, DaeBrokenRefError, \
DaeMalformedError, DaeUnsupportedError
from collada.util import toUnitVec, checkSource, xrange
from collada.xmlutil import etree as ElementTree
class Polygon(object):
"""Single polygon representation. Represents a polygon of N points."""
def __init__(self, indices, vertices, normal_indices, normals, texcoord_indices, texcoords, material):
"""A Polygon should not be created manually."""
self.vertices = vertices
"""A (N, 3) float array containing the points in the polygon."""
self.normals = normals
"""A (N, 3) float array with the normals for points in the polygon. Can be None."""
self.texcoords = texcoords
"""A tuple where entries are numpy float arrays of size (N, 2) containing
the texture coordinates for the points in the polygon for each texture
coordinate set. Can be length 0 if there are no texture coordinates."""
self.material = material
"""If coming from an unbound :class:`collada.polylist.Polylist`, contains a
string with the material symbol. If coming from a bound
:class:`collada.polylist.BoundPolylist`, contains the actual
:class:`collada.material.Effect` the line is bound to."""
self.indices = indices
"""A (N,) int array containing the indices for the vertices
of the N points in the polygon."""
self.normal_indices = normal_indices
"""A (N,) int array containing the indices for the normals of
the N points in the polygon"""
self.texcoord_indices = texcoord_indices
"""A (N,2) int array with texture coordinate indexes for the
texcoords of the N points in the polygon"""
def triangles(self):
"""This triangulates the polygon using a simple fanning method.
:rtype: generator of :class:`collada.polylist.Polygon`
"""
npts = len(self.vertices)
for i in range(npts-2):
tri_indices = numpy.array([
self.indices[0], self.indices[i+1], self.indices[i+2]
], dtype=numpy.float32)
tri_vertices = numpy.array([
self.vertices[0], self.vertices[i+1], self.vertices[i+2]
], dtype=numpy.float32)
if self.normals is None:
tri_normals = None
normal_indices = None
else:
tri_normals = numpy.array([
self.normals[0], self.normals[i+1], self.normals[i+2]
], dtype=numpy.float32)
normal_indices = numpy.array([
self.normal_indices[0],
self.normal_indices[i+1],
self.normal_indices[i+2]
], dtype=numpy.float32)
tri_texcoords = []
tri_texcoord_indices = []
for texcoord, texcoord_indices in zip(
self.texcoords, self.texcoord_indices):
tri_texcoords.append(numpy.array([
texcoord[0],
texcoord[i+1],
texcoord[i+2]
], dtype=numpy.float32))
tri_texcoord_indices.append(numpy.array([
texcoord_indices[0],
texcoord_indices[i+1],
texcoord_indices[i+2]
], dtype=numpy.float32))
tri = triangleset.Triangle(
tri_indices, tri_vertices,
normal_indices, tri_normals,
tri_texcoord_indices, tri_texcoords,
self.material)
yield tri
def __repr__(self):
return '<Polygon vertices=%d>' % len(self.vertices)
def __str__(self):
return repr(self)
class Polylist(primitive.Primitive):
"""Class containing the data COLLADA puts in a <polylist> tag, a collection of
polygons. The Polylist object is read-only. To modify a Polylist, create a new
instance using :meth:`collada.geometry.Geometry.createPolylist`.
* If ``P`` is an instance of :class:`collada.polylist.Polylist`, then ``len(P)``
returns the number of polygons in the set. ``P[i]`` returns the i\ :sup:`th`
polygon in the set.
"""
def __init__(self, sources, material, index, vcounts, xmlnode=None):
"""A Polylist should not be created manually. Instead, call the
:meth:`collada.geometry.Geometry.createPolylist` method after
creating a geometry instance.
"""
if len(sources) == 0: raise DaeIncompleteError('A polylist set needs at least one input for vertex positions')
if not 'VERTEX' in sources: raise DaeIncompleteError('Polylist requires vertex input')
#find max offset
max_offset = max([ max([input[0] for input in input_type_array])
for input_type_array in sources.values() if len(input_type_array) > 0])
self.material = material
self.index = index
self.indices = self.index
self.nindices = max_offset + 1
self.vcounts = vcounts
self.sources = sources
self.index.shape = (-1, self.nindices)
self.npolygons = len(self.vcounts)
self.nvertices = numpy.sum(self.vcounts) if len(self.index) > 0 else 0
self.polyends = numpy.cumsum(self.vcounts)
self.polystarts = self.polyends - self.vcounts
self.polyindex = numpy.dstack((self.polystarts, self.polyends))[0]
if len(self.index) > 0:
self._vertex = sources['VERTEX'][0][4].data
self._vertex_index = self.index[:,sources['VERTEX'][0][0]]
self.maxvertexindex = numpy.max( self._vertex_index )
checkSource(sources['VERTEX'][0][4], ('X', 'Y', 'Z'), self.maxvertexindex)
else:
self._vertex = None
self._vertex_index = None
self.maxvertexindex = -1
if 'NORMAL' in sources and len(sources['NORMAL']) > 0 and len(self.index) > 0:
self._normal = sources['NORMAL'][0][4].data
self._normal_index = self.index[:,sources['NORMAL'][0][0]]
self.maxnormalindex = numpy.max( self._normal_index )
checkSource(sources['NORMAL'][0][4], ('X', 'Y', 'Z'), self.maxnormalindex)
else:
self._normal = None
self._normal_index = None
self.maxnormalindex = -1
if 'TEXCOORD' in sources and len(sources['TEXCOORD']) > 0 \
and len(self.index) > 0:
self._texcoordset = tuple([texinput[4].data
for texinput in sources['TEXCOORD']])
self._texcoord_indexset = tuple([ self.index[:,sources['TEXCOORD'][i][0]]
for i in xrange(len(sources['TEXCOORD'])) ])
self.maxtexcoordsetindex = [numpy.max(each)
for each in self._texcoord_indexset]
for i, texinput in enumerate(sources['TEXCOORD']):
checkSource(texinput[4], ('S', 'T'), self.maxtexcoordsetindex[i])
else:
self._texcoordset = tuple()
self._texcoord_indexset = tuple()
self.maxtexcoordsetindex = -1
if xmlnode is not None:
self.xmlnode = xmlnode
"""ElementTree representation of the line set."""
else:
txtindices = ' '.join(map(str, self.indices.flatten().tolist()))
acclen = len(self.indices)
self.xmlnode = E.polylist(count=str(self.npolygons),
material=self.material)
all_inputs = []
for semantic_list in self.sources.values():
all_inputs.extend(semantic_list)
for offset, semantic, sourceid, set, src in all_inputs:
inpnode = E.input(offset=str(offset), semantic=semantic,
source=sourceid)
if set is not None:
inpnode.set('set', str(set))
self.xmlnode.append(inpnode)
vcountnode = E.vcount(' '.join(map(str, self.vcounts)))
self.xmlnode.append(vcountnode)
self.xmlnode.append(E.p(txtindices))
def __len__(self):
return self.npolygons
def __getitem__(self, i):
polyrange = self.polyindex[i]
vertindex = self._vertex_index[polyrange[0]:polyrange[1]]
v = self._vertex[vertindex]
normalindex = None
if self.normal is None:
n = None
else:
normalindex = self._normal_index[polyrange[0]:polyrange[1]]
n = self._normal[normalindex]
uvindices = []
uv = []
for j, uvindex in enumerate(self._texcoord_indexset):
uvindices.append( uvindex[polyrange[0]:polyrange[1]] )
uv.append( self._texcoordset[j][ uvindex[polyrange[0]:polyrange[1]] ] )
return Polygon(vertindex, v, normalindex, n, uvindices, uv, self.material)
_triangleset = None
def triangleset(self):
"""This performs a simple triangulation of the polylist using the fanning method.
:rtype: :class:`collada.triangleset.TriangleSet`
"""
if self._triangleset is None:
indexselector = numpy.zeros(self.nvertices) == 0
indexselector[self.polyindex[:,1]-1] = False
indexselector[self.polyindex[:,1]-2] = False
indexselector = numpy.arange(self.nvertices)[indexselector]
firstpolyindex = numpy.arange(self.nvertices)
firstpolyindex = firstpolyindex - numpy.repeat(self.polyends - self.vcounts, self.vcounts)
firstpolyindex = firstpolyindex[indexselector]
if len(self.index) > 0:
triindex = numpy.dstack( (self.index[indexselector-firstpolyindex],
self.index[indexselector+1],
self.index[indexselector+2]) )
triindex = numpy.swapaxes(triindex, 1,2).flatten()
else:
triindex =
|
numpy.array([], dtype=self.index.dtype)
|
numpy.array
|
class PondPicker:
def __init__(self, pond):
'get pond data and initialize plot'
# get pond data from Open Altimetry
import numpy as np
import matplotlib.pylab as plt
import json
import requests
self.pond = pond
if pond == 1:
self.latlims = [-72.9969, -72.9890]
self.lonlims = [67.2559, 67.2597]
self.hlims = [217, 224]
#self.has2parts = True
elif pond == 2:
self.latlims = [-72.8937, -72.8757]
self.lonlims = [67.3046, 67.3131]
self.hlims = [204, 212]
#self.has2parts = True
elif pond == 3:
self.latlims = [-71.8767, -71.8669]
self.lonlims = [67.7598, 67.7640]
self.hlims = [89, 98]
#self.has2parts = True
elif pond == 4:
self.latlims = [-71.6481, -71.6376]
self.lonlims = [67.8563, 67.8608]
self.hlims = [76, 88]
#self.has2parts = False
self.url = 'https://openaltimetry.org/data/api/icesat2/atl03?minx={minx}&miny={miny}&maxx={maxx}&maxy={maxy}&trackId=81&beamName=gt2l&outputFormat=json&date=2019-01-02&client=jupyter'
self.url = self.url.format(minx=self.lonlims[0],miny=self.latlims[0],maxx=self.lonlims[1],maxy=self.latlims[1])
print('requesting data: ', self.url)
self.conf_ph = ['Buffer', 'Low', 'Medium', 'High']
r = requests.get(self.url)
self.data = r.json()
self.lat_ph = []
self.lon_ph = []
self.h_ph = []
for beam in self.data:
for photons in beam['series']:
if any(word in photons['name'] for word in self.conf_ph):
for p in photons['data']:
self.lat_ph.append(p[0])
self.lon_ph.append(p[1])
self.h_ph.append(p[2])
# plot the data
self.fig = plt.figure(figsize=[9, 6.5])
self.ax = self.fig.add_subplot(111)
self.colph = np.array([[0.25, 0.25, 0.25]])
self.cols = 'b'
self.colb = np.array([252, 3, 73]) / 255
self.ax.set_title("EDITING THE POND SURFACE: click to select points\npress '2' to edit the lake bed\npress 'backspace' to delete last point or 'n' to start a new line segment",color=self.cols)
self.ax.set_xlabel('latitude')
self.ax.set_ylabel('elevation [m]')
self.ax.spines['bottom'].set_color(self.cols)
self.ax.spines['left'].set_color(self.cols)
self.ax.spines['top'].set_color(self.cols)
self.ax.spines['right'].set_color(self.cols)
self.ax.xaxis.label.set_color(self.cols)
self.ax.yaxis.label.set_color(self.cols)
self.ax.tick_params(axis='x', colors=self.cols)
self.ax.tick_params(axis='y', colors=self.cols)
self.phscat = self.ax.scatter(self.lat_ph,self.h_ph,s=30,c=self.colph,alpha=0.2,edgecolors='none')
self.sline, = self.ax.plot([],[],c=self.cols,ls='-',marker='o',ms=3,mfc='w',mec=self.cols)
self.bline, = self.ax.plot([],[],c=self.colb,ls='-',marker='o',ms=3,mfc='w',mec=self.colb)
self.ax.set_xlim((self.latlims[0], self.latlims[1]))
self.ax.set_ylim((self.hlims[0], self.hlims[1]))
self.xs = list(self.sline.get_xdata())
self.ys = list(self.sline.get_ydata())
self.xb = list(self.bline.get_xdata())
self.yb = list(self.bline.get_ydata())
self.lastkey = '1'
# connect to all the events we need
self.cidpress = self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.cidback = self.fig.canvas.mpl_connect('key_press_event', self.on_key)
def on_press(self, event):
'add a point on mouse click and update line'
if self.lastkey == '1':
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.sline.set_data(self.xs, self.ys)
self.sline.figure.canvas.draw()
elif self.lastkey == '2':
self.xb.append(event.xdata)
self.yb.append(event.ydata)
self.bline.set_data(self.xb, self.yb)
self.bline.figure.canvas.draw()
def on_key(self, event):
'delete the last point on backspace and update plot'
if event.key == 'backspace':
if self.lastkey == '1':
del self.xs[-1]
del self.ys[-1]
self.sline.set_data(self.xs, self.ys)
self.sline.figure.canvas.draw()
elif self.lastkey == '2':
del self.xb[-1]
del self.yb[-1]
self.bline.set_data(self.xb, self.yb)
self.bline.figure.canvas.draw()
elif event.key == 'n':
if self.lastkey == '1':
self.xs.append('nan')
self.ys.append('nan')
self.sline.set_data(self.xs, self.ys)
self.sline.figure.canvas.draw()
elif self.lastkey == '2':
self.xb.append('nan')
self.yb.append('nan')
self.bline.set_data(self.xb, self.yb)
self.bline.figure.canvas.draw()
else:
self.lastkey = event.key
if event.key == '1':
col = self.cols
tit = "EDITING THE POND SURFACE: click to select points\npress '2' to edit the lake bed\npress 'backspace' to delete last point or 'n' to start a new line segment"
elif event.key == '2':
col = self.colb
tit = "EDITING THE LAKE BED: click to select points\npress '1' to edit the surface\npress 'backspace' to delete last point or 'n' to start a new line segment"
else:
col = 'k'
tit = "invalid keyboard input\npress '1' to edit the surface or '2' to edit the lake bed"
self.ax.set_title(tit,color=col)
self.ax.spines['bottom'].set_color(col)
self.ax.spines['left'].set_color(col)
self.ax.spines['top'].set_color(col)
self.ax.spines['right'].set_color(col)
self.ax.xaxis.label.set_color(col)
self.ax.yaxis.label.set_color(col)
self.ax.tick_params(axis='x', colors=col)
self.ax.tick_params(axis='y', colors=col)
event.canvas.draw()
def getDataDownload(pond1,pond2,pond3,pond4,YOUR_NAME):
import pandas as pd
import os
import matplotlib.pylab as plt
import numpy as np
lat = np.array([])
h = np.array([])
pondid = np.array([])
typeid = np.array([])
ponddata = [pond1, pond2, pond3, pond4]
for ipond,p in enumerate(ponddata):
s1 = [p.xs, p.ys]
s2 = [p.xb, p.yb]
for itype,s in enumerate([s1, s2]):
lat =
|
np.append(lat,s[0])
|
numpy.append
|
#
# Tests for the jacobian methods
#
import pybamm
import numpy as np
import unittest
from scipy.sparse import eye
from tests import get_mesh_for_testing
def test_multi_var_function(arg1, arg2):
return arg1 + arg2
class TestJacobian(unittest.TestCase):
def test_variable_is_statevector(self):
a = pybamm.Symbol("a")
with self.assertRaisesRegex(
TypeError, "Jacobian can only be taken with respect to a 'StateVector'"
):
a.jac(a)
def test_linear(self):
y = pybamm.StateVector(slice(0, 4))
u = pybamm.StateVector(slice(0, 2))
v = pybamm.StateVector(slice(2, 4))
y0 = np.ones(4)
func = u
jacobian = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = -v
jacobian = np.array([[0, 0, -1, 0], [0, 0, 0, -1]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = 3 * u + 4 * v
jacobian = np.array([[3, 0, 4, 0], [0, 3, 0, 4]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = 7 * u - v * 9
jacobian = np.array([[7, 0, -9, 0], [0, 7, 0, -9]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
A = pybamm.Matrix(2 * eye(2))
func = A @ u
jacobian = np.array([[2, 0, 0, 0], [0, 2, 0, 0]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = u @ pybamm.StateVector(slice(0, 1))
with self.assertRaises(NotImplementedError):
func.jac(y)
# when differentiating by independent part of the state vector
jacobian = np.array([[0, 0], [0, 0]])
du_dv = u.jac(v).evaluate().toarray()
np.testing.assert_array_equal(du_dv, jacobian)
def test_nonlinear(self):
y = pybamm.StateVector(slice(0, 4))
u = pybamm.StateVector(slice(0, 2))
v = pybamm.StateVector(slice(2, 4))
y0 = np.array([1, 2, 3, 4])
func = v ** 2
jacobian = np.array([[0, 0, 6, 0], [0, 0, 0, 8]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = 2 ** v
jacobian = np.array(
[[0, 0, 2 ** 3 * np.log(2), 0], [0, 0, 0, 2 ** 4 * np.log(2)]]
)
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = v ** v
jacobian = [[0, 0, 27 * (1 + np.log(3)), 0], [0, 0, 0, 256 * (1 + np.log(4))]]
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_almost_equal(jacobian, dfunc_dy.toarray())
func = u * v
jacobian = np.array([[3, 0, 1, 0], [0, 4, 0, 2]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = u * (u + v)
jacobian = np.array([[5, 0, 1, 0], [0, 8, 0, 2]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = 1 / u + v / 3
jacobian = np.array([[-1, 0, 1 / 3, 0], [0, -1 / 4, 0, 1 / 3]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = u / v
jacobian = np.array([[1 / 3, 0, -1 / 9, 0], [0, 1 / 4, 0, -1 / 8]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = v / (1 + v)
jacobian = np.array([[0, 0, 1 / 16, 0], [0, 0, 0, 1 / 25]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
def test_multislice_raises(self):
y1 = pybamm.StateVector(slice(0, 4), slice(7, 8))
y_dot1 = pybamm.StateVectorDot(slice(0, 4), slice(7, 8))
y2 = pybamm.StateVector(slice(4, 7))
with self.assertRaises(NotImplementedError):
y1.jac(y1)
with self.assertRaises(NotImplementedError):
y2.jac(y1)
with self.assertRaises(NotImplementedError):
y_dot1.jac(y1)
def test_linear_ydot(self):
y = pybamm.StateVector(slice(0, 4))
y_dot = pybamm.StateVectorDot(slice(0, 4))
u = pybamm.StateVector(slice(0, 2))
v = pybamm.StateVector(slice(2, 4))
u_dot = pybamm.StateVectorDot(slice(0, 2))
v_dot = pybamm.StateVectorDot(slice(2, 4))
y0 = np.ones(4)
y_dot0 = np.ones(4)
func = u_dot
jacobian = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = -v_dot
jacobian = np.array([[0, 0, -1, 0], [0, 0, 0, -1]])
dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = u_dot
jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])
dfunc_dy = func.jac(y).evaluate(y=y0, y_dot=y_dot0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = -v_dot
jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])
dfunc_dy = func.jac(y).evaluate(y=y0, y_dot=y_dot0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = u
jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])
dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = -v
jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])
dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
def test_functions(self):
y = pybamm.StateVector(slice(0, 4))
u = pybamm.StateVector(slice(0, 2))
v = pybamm.StateVector(slice(2, 4))
const = pybamm.Scalar(1)
y0 = np.array([1.0, 2.0, 3.0, 4.0])
func = pybamm.sin(u)
jacobian = np.array([[np.cos(1), 0, 0, 0], [0, np.cos(2), 0, 0]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = pybamm.cos(v)
jacobian = np.array([[0, 0, -np.sin(3), 0], [0, 0, 0, -np.sin(4)]])
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = pybamm.sin(3 * u * v)
jacobian = np.array(
[
[9 * np.cos(9), 0, 3 * np.cos(9), 0],
[0, 12 * np.cos(24), 0, 6 * np.cos(24)],
]
)
dfunc_dy = func.jac(y).evaluate(y=y0)
np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())
func = pybamm.cos(5 * pybamm.exp(u + v))
jacobian = np.array(
[
[
-5 * np.exp(4) * np.sin(5 * np.exp(4)),
0,
-5 * np.exp(4) * np.sin(5 * np.exp(4)),
0,
],
[
0,
-5 * np.exp(6) * np.sin(5 * np.exp(6)),
0,
-5 * np.exp(6) * np.sin(5 *
|
np.exp(6)
|
numpy.exp
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#xyz2tab
import sys #stdout
import argparse #argument parser
import re #regex
import itertools #for r-length tuples, in sorted order, no repeated elements
import pandas as pd #pandas tables
import numpy as np #for calculations
from scipy.spatial.distance import pdist, squareform, cosine #for the calculations of the distance matrix and angles (cosine)
from tabulate import tabulate #nice table output
import matplotlib.pyplot as plt #for molecule display
from mpl_toolkits.mplot3d import Axes3D #for molecule display
from matplotlib.patches import FancyArrowPatch #for fancy arrows in xyz
from mpl_toolkits.mplot3d import proj3d #for fancy arrows in xyz
#for windows console
sys.stdout.reconfigure(encoding='utf-8')
#pd.set_option("display.max_rows", None, "display.max_columns", None)
#+x % to covalence radius
#radii_ext = 8 / 100
#covalent radii from Alvarez (2008)
#DOI: 10.1039/b801115j
covalent_radii = {
'H': 0.31, 'He': 0.28, 'Li': 1.28,
'Be': 0.96, 'B': 0.84, 'C': 0.76,
'N': 0.71, 'O': 0.66, 'F': 0.57, 'Ne': 0.58,
'Na': 1.66, 'Mg': 1.41, 'Al': 1.21, 'Si': 1.11,
'P': 1.07, 'S': 1.05, 'Cl': 1.02, 'Ar': 1.06,
'K': 2.03, 'Ca': 1.76, 'Sc': 1.70, 'Ti': 1.60,
'V': 1.53, 'Cr': 1.39, 'Mn': 1.61, 'Fe': 1.52,
'Co': 1.50, 'Ni': 1.24, 'Cu': 1.32, 'Zn': 1.22,
'Ga': 1.22, 'Ge': 1.20, 'As': 1.19, 'Se': 1.20,
'Br': 1.20, 'Kr': 1.16, 'Rb': 2.20, 'Sr': 1.95,
'Y': 1.90, 'Zr': 1.75, 'Nb': 1.64, 'Mo': 1.54,
'Tc': 1.47, 'Ru': 1.46, 'Rh': 1.42, 'Pd': 1.39,
'Ag': 1.45, 'Cd': 1.44, 'In': 1.42, 'Sn': 1.39,
'Sb': 1.39, 'Te': 1.38, 'I': 1.39, 'Xe': 1.40,
'Cs': 2.44, 'Ba': 2.15, 'La': 2.07, 'Ce': 2.04,
'Pr': 2.03, 'Nd': 2.01, 'Pm': 1.99, 'Sm': 1.98,
'Eu': 1.98, 'Gd': 1.96, 'Tb': 1.94, 'Dy': 1.92,
'Ho': 1.92, 'Er': 1.89, 'Tm': 1.90, 'Yb': 1.87,
'Lu': 1.87, 'Hf': 1.75, 'Ta': 1.70, 'W': 1.62,
'Re': 1.51, 'Os': 1.44, 'Ir': 1.41, 'Pt': 1.36,
'Au': 1.36, 'Hg': 1.32, 'Tl': 1.45, 'Pb': 1.46,
'Bi': 1.48, 'Po': 1.40, 'At': 1.50, 'Rn': 1.50,
'Fr': 2.60, 'Ra': 2.21, 'Ac': 2.15, 'Th': 2.06,
'Pa': 2.00, 'U': 1.96, 'Np': 1.90, 'Pu': 1.87,
'Am': 1.80, 'Cm': 1.69
}
#atomic weights
atomic_weights = {
'H' : 1.008,'He' : 4.003, 'Li' : 6.941, 'Be' : 9.012,
'B' : 10.811, 'C' : 12.011, 'N' : 14.007, 'O' : 15.999,
'F' : 18.998, 'Ne' : 20.180, 'Na' : 22.990, 'Mg' : 24.305,
'Al' : 26.982, 'Si' : 28.086, 'P' : 30.974, 'S' : 32.066,
'Cl' : 35.453, 'Ar' : 39.948, 'K' : 39.098, 'Ca' : 40.078,
'Sc' : 44.956, 'Ti' : 47.867, 'V' : 50.942, 'Cr' : 51.996,
'Mn' : 54.938, 'Fe' : 55.845, 'Co' : 58.933, 'Ni' : 58.693,
'Cu' : 63.546, 'Zn' : 65.38, 'Ga' : 69.723, 'Ge' : 72.631,
'As' : 74.922, 'Se' : 78.971, 'Br' : 79.904, 'Kr' : 84.798,
'Rb' : 84.468, 'Sr' : 87.62, 'Y' : 88.906, 'Zr' : 91.224,
'Nb' : 92.906, 'Mo' : 95.95, 'Tc' : 98.907, 'Ru' : 101.07,
'Rh' : 102.906, 'Pd' : 106.42, 'Ag' : 107.868, 'Cd' : 112.414,
'In' : 114.818, 'Sn' : 118.711, 'Sb' : 121.760, 'Te' : 126.7,
'I' : 126.904, 'Xe' : 131.294, 'Cs' : 132.905, 'Ba' : 137.328,
'La' : 138.905, 'Ce' : 140.116, 'Pr' : 140.908, 'Nd' : 144.243,
'Pm' : 144.913, 'Sm' : 150.36, 'Eu' : 151.964, 'Gd' : 157.25,
'Tb' : 158.925, 'Dy': 162.500, 'Ho' : 164.930, 'Er' : 167.259,
'Tm' : 168.934, 'Yb' : 173.055, 'Lu' : 174.967, 'Hf' : 178.49,
'Ta' : 180.948, 'W' : 183.84, 'Re' : 186.207, 'Os' : 190.23,
'Ir' : 192.217, 'Pt' : 195.085, 'Au' : 196.967, 'Hg' : 200.592,
'Tl' : 204.383, 'Pb' : 207.2, 'Bi' : 208.980, 'Po' : 208.982,
'At' : 209.987, 'Rn' : 222.081, 'Fr' : 223.020, 'Ra' : 226.025,
'Ac' : 227.028, 'Th' : 232.038, 'Pa' : 231.036, 'U' : 238.029,
'Np' : 237, 'Pu' : 244, 'Am' : 243, 'Cm' : 247
}
#dict for numbers to subscript numbers
utf_sub_dict = {
"0" : "₀",
"1" : "₁",
"2" : "₂",
"3" : "₃",
"4" : "₄",
"5" : "₅",
"6" : "₆",
"7" : "₇",
"8" : "₈",
"9" : "₉",
}
#numbers to subscript (utf8) numbers
def num_to_subnum(number):
utf_number=''
for letter in str(number):
utf_letter=utf_sub_dict[letter]
utf_number=utf_number+utf_letter
return(utf_number)
#removes the upper triangle of the distance matrix and zeros
#e.g., from d(A-B) = 1.234 Å = d(B-A) =1.234 Å, d(B-A) will be removed
#d(A-B) = 0 Å will be removed as well
def dm_to_series1(df):
df = df.astype(float) # do not comment this, angle list will be incomplete
df.values[np.triu_indices_from(df, k=1)] = np.nan
#replace zeros with nan
df = df.replace(0, np.nan)
#return and drop all nan
return df.unstack().dropna()
#calculate angle from 3 vectors / atomic coordinates: i(x,y,z); j(x,y,z); k(x,y,z)
#xyzarr is the array of all atomic coordinates
def calc_angle(xyzarr, i, j, k):
rij = xyzarr[i] - xyzarr[j]
rkj = xyzarr[k] - xyzarr[j]
#remove if cosine fails
#cos_theta = np.dot(rij, rkj)
#sin_theta = np.linalg.norm(np.cross(rij, rkj))
#theta = np.arctan2(sin_theta, cos_theta)
#scipy pdist cosine instead of the 3 lines above
theta = cosine(rij,rkj)
theta = np.arccos(1-theta)
return np.degrees(theta)
#calculate the dihedral angle from 4 vectors / atomic coordinates: i(x,y,z); j(x,y,z); k(x,y,z); l(x,y,z)
def calc_d_angle(xyzarr, i, j, k, l):
#no warning if division by zero
np.seterr(invalid='ignore')
rji = -1*(xyzarr[j] - xyzarr[i])
rkj = xyzarr[k] - xyzarr[j]
rlk = xyzarr[l] - xyzarr[k]
rkj /= np.linalg.norm(rkj)
v = rji - np.dot(rji, rkj)*rkj
w = rlk - np.dot(rlk, rkj)*rkj
x = np.dot(v, w)
y = np.dot(np.cross(rkj, v), w)
return np.degrees(np.arctan2(y,x))
#calculation of the best-fit plane
#https://gist.github.com/bdrown/a2bc1da0123b142916c2f343a20784b4
def svd_fit(X):
C = np.average(X, axis=0)
# Create CX vector (centroid to point) matrix
CX = X - C
# Singular value decomposition
U, S, V = np.linalg.svd(CX)
# The last row of V matrix indicate the eigenvectors of
# smallest eigenvalues (singular values).
N = V[-1]
return C, N
#https://stackoverflow.com/questions/13685386/matplotlib-equal-unit-length-with-equal-aspect-ratio-z-axis-is-not-equal-to
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle =
|
np.mean(x_limits)
|
numpy.mean
|
"""
File: psf2otf.py
Author: Nrupatunga
Email: <EMAIL>
Github: https://github.com/nrupatunga
Description: Implementation of matlab's psf2otf
Notes: In order to understand psf2otf:
FFT does cyclic convolution. To understand what cyclic convolution is
please refer to the document below (also in the docs)
https://www.docdroid.net/YSKkZ5Y/fft-based-2d-cyclic-convolution-pdf#page=5
"""
from typing import Optional
import matplotlib.pyplot as plt
import numpy as np
def circshift(psf: np.ndarray, shift: np.ndarray) -> np.ndarray:
"""Circular shifts
@psf: input psf
@shift: shifts correspoinding to each dimension
@returns: TODO
"""
shift = np.int32(shift)
for i in range(shift.size):
psf = np.roll(psf, shift[i], axis=i)
return psf
def surf_plot(data: np.ndarray):
x = np.linspace(0, data.shape[1], data.shape[1])
y =
|
np.linspace(0, data.shape[0], data.shape[0])
|
numpy.linspace
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import obspy
from PIL import Image
from matplotlib.lines import Line2D
import os
from matplotlib.patches import Circle
from obspy.imaging.beachball import beach
import matplotlib.image as mpimg
import io
from sklearn import preprocessing
import pandas as pd
import matplotlib.cm as cm
import glob
from os.path import join as pjoin
from typing import List as _List, Union as _Union
import instaseis
from obspy import UTCDateTime as utct
from obspy.imaging.beachball import aux_plane
from sklearn.cluster import KMeans
from pyrocko import moment_tensor as mtm
import matplotlib.patches as mpatches
import glob
pyproj_datadir = os.environ["PROJ_LIB"]
from mpl_toolkits.basemap import Basemap
import re
from SS_MTI import Read_H5 as _ReadH5
from SS_MTI import MTDecompose as _MTDecompose
from SS_MTI import Forward as _Forward
from SS_MTI import PreProcess as _PreProcess
from SS_MTI import GreensFunctions as _GreensFunctions
from SS_MTI import RadiationPattern as _RadiationPattern
def Plot_veloc_models(Taup_model, depth_event=None, depth_syn=None):
depth = np.array([])
Vp = np.array([])
Vs = np.array([])
dens = np.array([])
for i, values in enumerate(Taup_model.model.s_mod.v_mod.layers):
depth = np.append(depth, values[0])
depth = np.append(depth, values[1])
Vp = np.append(Vp, values[2])
Vp = np.append(Vp, values[3])
Vs = np.append(Vs, values[4])
Vs = np.append(Vs, values[5])
dens = np.append(dens, values[6])
dens = np.append(dens, values[7])
fig, ax = plt.subplots(1, 3, sharey="all", sharex="all", figsize=(8, 6))
ax[0].plot(Vp, depth)
if depth_event is not None:
int_vp = interpolate.interp1d(depth, Vp)
event_vp = int_vp(depth_event)
ax[0].plot(event_vp, depth_event, "g*", markersize=15, label="Event Depth")
if depth_syn is not None:
for i in range(len(depth_syn)):
event_vp = int_vp(depth_syn[i])
if i == 0:
ax[0].plot(
event_vp, depth_syn[i], "r*", markersize=15, label="Synthetic Depth",
)
else:
ax[0].plot(event_vp, depth_syn[i], "r*", markersize=15, label="_hidden")
ax[0].set_title("VP", color="b", fontsize=20)
ax[0].set_ylabel("Depth [km]", fontsize=20)
ax[0].tick_params(axis="x", labelsize=18)
ax[0].tick_params(axis="y", labelsize=18)
# ax[0].ticklabel_format(style="sci", axis='y', scilimits=(-2, 2))
ax[0].grid(True)
# ax[0].set_ylim([500,0])
ax[0].set_xlim([0, 8])
ax[1].plot(Vs, depth, label="Shallow")
if depth_event is not None:
int_vs = interpolate.interp1d(depth, Vs)
event_vs = int_vs(depth_event)
ax[1].plot(event_vs, depth_event, "g*", markersize=15, label="Event Depth")
if depth_syn is not None:
for i in range(len(depth_syn)):
event_vs = int_vs(depth_syn[i])
if i == 0:
ax[1].plot(
event_vs, depth_syn[i], "r*", markersize=15, label="Synthetic Depth",
)
else:
ax[1].plot(event_vs, depth_syn[i], "r*", markersize=15, label="_hidden")
# ax[1].legend()
ax[1].set_title("VS", color="b", fontsize=20)
ax[1].tick_params(axis="x", labelsize=18)
ax[1].tick_params(axis="y", labelsize=18)
# ax[1].ticklabel_format(style="sci", axis='y', scilimits=(-2, 2))
ax[1].grid(True)
# ax[0].set_ylim([0,100])
ax[2].plot(dens, depth)
if depth_event is not None:
int_dens = interpolate.interp1d(depth, dens)
event_dens = int_dens(depth_event)
ax[2].plot(event_dens, depth_event, "g*", markersize=15, label="Event Depth")
if depth_syn is not None:
for i in range(len(depth_syn)):
event_dens = int_dens(depth_syn[i])
if i == 0:
ax[2].plot(
event_dens, depth_syn[i], "r*", markersize=15, label="Synthetic Depth",
)
else:
ax[2].plot(event_dens, depth_syn[i], "r*", markersize=15, label="_hidden")
ax[2].legend()
ax[2].set_title("Density", color="b", fontsize=20)
ax[2].tick_params(axis="x", labelsize=18)
ax[2].tick_params(axis="y", labelsize=18)
# ax[2].ticklabel_format(style="sci", axis='y', scilimits=(-2, 2))
ax[2].grid(True)
ax[2].set_ylim([0, 100])
ax[0].set_ylim(ax[0].get_ylim()[::-1])
return fig
def Plot_trace_vs_depth_copy(
stream: obspy.Stream,
depth: float,
total_depths: int,
Ytick: float,
phase: str,
phase_arr: float,
t_pre: float = 10.0,
t_post: float = 50.0,
fig: plt.figure = None,
ax: plt.axes = None,
extra_phases: [str] = None,
extra_arrs: [float] = None,
phase_colors: [str] = None,
phase_labels: dict = None,
):
if fig is None and ax is None:
fig, ax = plt.subplots(
nrows=1,
ncols=len(stream),
figsize=(5 * len(stream), 2 * total_depths),
sharex="col",
sharey="all",
)
st = stream.copy()
global_max = max([tr.data.max() for tr in st])
global_min = min([tr.data.min() for tr in st])
y = global_max * 0.9 + Ytick
ymin = global_min + Ytick
ymax = global_max + Ytick
for i in range(len(stream)):
ax[i].plot(
st[i].times() - t_pre, st[i].data + Ytick, "k",
)
ax[i].plot(
[0, 0], [ymin, ymax], "grey",
)
ax[i].text(0, y, phase, verticalalignment="center", color="grey", fontsize=6)
if extra_phases is not None:
for k in range(len(extra_phases)):
if extra_arrs[k] is None:
continue
phase_t = extra_arrs[k]
if phase_colors is None:
y = global_max * 0.9 + Ytick
c = "grey"
else:
y = global_max * 0.9 + Ytick
ind = re.findall(r"\d+", extra_phases[k])
if ind:
if len(ind) == 2:
if int(ind[0]) < depth:
c = "blue"
y = global_min * 0.4 + Ytick
else:
c = "red"
y = global_min * 0.4 + Ytick
else:
c = phase_colors[k]
else:
c = phase_colors[k]
y = global_max * 0.9 + Ytick
ax[i].plot(
[phase_t, phase_t], [ymin, ymax], c,
)
ax[i].text(
phase_t + 0.1,
y,
extra_phases[k],
verticalalignment="center",
color=c,
fontsize=6,
rotation=90,
)
ax[i].set_xlim(-t_pre, t_post)
ax[i].set_title(f"{phase}-Phase channel:{st[i].stats.channel}")
if phase_colors is not None:
unique_colors = list(set(phase_colors))
# unique_list = [mpatches.Patch(color=c, label=phase_labels[c]) for c in phase_labels]
unique_list = [
Line2D([0], [0], color=c, linewidth=3, label=phase_labels[c]) for c in phase_labels
]
ax[0].legend(
handles=unique_list, prop={"size": 6}, loc="upper left", bbox_to_anchor=(0.0, 1.07),
)
# fig.legend(handles=unique_list, prop={"size": 6}, loc="upper left")
fig.text(0.04, 0.5, "Source Depth (km)", va="center", rotation="vertical")
fig.text(0.5, 0.04, "Time after arrival (s)", va="center")
return fig, ax
def Plot_trace_vs_depth(
stream: obspy.Stream,
phase: str,
total_depths: int,
Ytick: float,
t_pre: float = 10.0,
t_post: float = 50.0,
fig: plt.figure = None,
ax: plt.axes = None,
):
if fig is None and ax is None:
fig, ax = plt.subplots(
nrows=1,
ncols=len(stream),
figsize=(5 * len(stream), 2 * total_depths),
sharex="col",
sharey="all",
)
st = stream.copy()
global_max = max([tr.data.max() for tr in st])
global_min = min([tr.data.min() for tr in st])
y = global_max * 0.9 + Ytick
ymin = global_min + Ytick
ymax = global_max + Ytick
for i in range(len(stream)):
ax[i].plot(
st[i].times() - t_pre, st[i].data + Ytick, "k",
)
ax[i].set_xlim(-t_pre, t_post)
ax[i].set_title(f"{phase}-Phase channel:{st[i].stats.channel}")
fig.text(0.04, 0.5, "Source Depth (km)", va="center", rotation="vertical")
fig.text(0.5, 0.04, "Time after arrival (s)", va="center")
return fig, ax
def Plot_phases_vs_comp(
stream: obspy.Stream,
phase_cuts: [str],
phase_arrs: [float],
t_pre: float = 20.0,
t_post: float = 60.0,
extra_phases: [str] = None,
extra_arrs: [float] = None,
phase_colors: [str] = None,
phase_labels: dict = None,
):
""" Plotting function that cuts the stream the phases in phase_cuts"""
if not len(phase_cuts) == len(phase_arrs):
raise ValueError("phase_cut and phase_arrs should have same length")
if extra_phases is not None:
if not len(extra_phases) == len(extra_arrs):
raise ValueError("extra_phases and extra_arrs should have same length")
fig, ax = plt.subplots(
nrows=len(stream), ncols=len(phase_cuts), figsize=(18, 8), sharex="col", sharey="all",
)
for j in range(len(phase_cuts)):
st = stream.copy()
st.trim(
starttime=st[0].stats.starttime + phase_arrs[j] - t_pre,
endtime=st[0].stats.starttime + phase_arrs[j] + t_post,
)
for i in range(len(stream)):
ax[i, j].plot(
st[i].times() - t_pre, st[i].data, "k",
)
y = ax[i, j].get_ylim()[1] * 0.8
ax[i, j].axvline(x=0, c="grey")
ax[i, j].text(
0, y, phase_cuts[j], verticalalignment="center", color="grey", fontsize=6,
)
if extra_phases is not None:
for k in range(len(extra_phases)):
if extra_arrs[k] is None:
continue
if phase_colors is None:
c = "grey"
else:
c = phase_colors[k]
phase_t = extra_arrs[k] - phase_arrs[j]
ax[i, j].axvline(x=phase_t, c=c)
ax[i, j].text(
phase_t + 0.1,
y,
extra_phases[k],
verticalalignment="center",
color=c,
fontsize=6,
rotation=90,
)
ax[i, j].set_xlim(-t_pre, t_post)
if i == 0:
ax[i, j].set_title(f"{phase_cuts[j]}-phase")
if j == 0:
ax[i, j].set_ylabel(st[i].stats.channel)
ax[0, 0].set_ylim(-1, 1)
if phase_colors is not None:
unique_colors = list(set(phase_colors))
# unique_list = [mpatches.Patch(color=c, label=phase_labels[c]) for c in phase_labels]
unique_list = [
Line2D([0], [0], color=c, linewidth=3, label=phase_labels[c]) for c in phase_labels
]
ax[0, 0].legend(
handles=unique_list, prop={"size": 6}, loc="upper left", bbox_to_anchor=(0.0, 1.4),
)
fig.text(0.04, 0.5, "Displacement (m)", va="center", rotation="vertical")
fig.text(0.5, 0.04, "Time after arrival (s)", va="center")
return fig, ax
def Plot_event_location(
la_s: float, lo_s: float, la_r: float, lo_r: float, name: str = "test event"
):
# la_s = event.latitude
# lo_s = event.longitude
mars_dir = "/home/nienke/Documents/Research/Data/mars_pictures/Mars_lightgray.jpg"
fig = plt.figure(figsize=(10, 8))
# m = Basemap(projection='moll', lon_0=round(0.0))
m = Basemap(
projection="merc", llcrnrlat=-80, urcrnrlat=80, llcrnrlon=0, urcrnrlon=200, resolution="c",
)
# draw parallels and meridians.
par = np.arange(-90, 90, 30)
label_par = np.full(len(par), True, dtype=bool)
meridians = np.arange(-180, 180, 30)
label_meri = np.full(len(meridians), True, dtype=bool)
m.drawmeridians(np.arange(-180, 180, 30), labels=label_meri)
m.drawparallels(np.arange(-90, 90, 30), label=label_par)
m.warpimage(mars_dir)
mstatlon, mstatlat = m(lo_r, la_r)
m.plot(mstatlon, mstatlat, "k^", markersize=20, label="InSight")
EQlonA, EQlatA = m(lo_s, la_s)
# EQlonB, EQlatB = m(lo_sB, la_sB) # 235b
# EQlonC, EQlatC = m(lo_sC, la_sC)
# EQlonD, EQlatD = m(lo_sC, la_sC)
m.plot(EQlonA, EQlatA, "r*", markersize=20, zorder=10, label=name)
# m.plot(EQlonB, EQlatB, 'g*', markersize=20, zorder=10, label = event_B.name)
# m.plot(EQlonC, EQlatC, 'b*', markersize=20, zorder=10, label=event_C.name)
plt.legend(fontsize=20)
plt.tight_layout()
# plt.show()
# plt.savefig('Location_Event.pdf')
return fig
""" Plot beachballs """
def Get_bb_img(MT, color, alpha=1.0):
### FULL MOMENT TENSOR
img = None
buf = io.BytesIO()
fig_bb = plt.figure(figsize=(5, 5), dpi=200)
ax_bb_1 = fig_bb.add_axes([0.0, 0.0, 1.0, 1.0])
ax_bb_1.set_xticks([])
ax_bb_1.set_yticks([])
ax_bb_1.axis("off")
if np.count_nonzero(MT) < 6 and len(MT) == 6:
pass
else:
b = beach(
fm=MT, width=990, linewidth=0, facecolor=color, xy=(0, 0), axes=ax_bb_1, alpha=alpha,
)
ax_bb_1.add_collection(b)
ax_bb_1.set_xlim((-1, 1))
ax_bb_1.set_ylim((-1, 1))
buf.seek(0)
fig_bb.savefig(buf, format="png", dpi=200)
buf.seek(0)
if img is None:
img = mpimg.imread(buf)
else:
img += mpimg.imread(buf)
plt.close(fig_bb)
return img, buf
def Plot_Direct_BB(
MT_Full,
Eps,
MT_DC,
M0_DC,
MT_CLVD,
M0_CLVD,
azimuths,
inc_angles,
phase_names,
color,
height=None,
horizontal=False,
):
if horizontal:
width = 15.0
height = 6.0
axis_height = 5.0 / height
resid_heigt = 1.0 - axis_height
title_height = resid_heigt
axis_width = 5.0 / width
else:
if height == None:
height = 19.0
axis_height = 5.0 / height
resid_height = 1.0 - 3.0 * axis_height
title_height = resid_height / 3.0
DC_scal = np.sqrt(1 - Eps / 0.5)
CLVD_scal = np.sqrt(1 - (1 - Eps / 0.5))
## Full moment tensor:
img1, buf1 = Get_bb_img(MT_Full, color)
if horizontal:
fig = plt.figure(figsize=(width, height), dpi=200)
ax_1 = fig.add_axes([0.0, 0.0, axis_width, axis_height])
else:
fig = plt.figure(figsize=(5, height), dpi=200)
ax_1 = fig.add_axes([0.0, 2 * (axis_height + title_height), 1.0, axis_height])
if img1 is not None:
ax_1.imshow(img1 / np.max(img1.flatten()))
if horizontal:
ax_X = fig.add_axes([0.0, 0.0, axis_width, axis_height], label="Circle_ray")
else:
ax_X = fig.add_axes(
[0.0, 2 * (axis_height + title_height), 1.0, axis_height], label="Circle_ray",
)
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(np.deg2rad(a)) * i / 90.0
y = np.cos(np.deg2rad(a)) * i / 90.0
p = Circle(
(x, y), 0.015, linewidth=2, edgecolor="k", zorder=0, facecolor="k", fill=True,
)
ax_X.add_patch(p)
ax_X.text(x - 0.005, y + 0.03, s=phase, fontsize=40)
for a in [ax_1, ax_X]:
a.set_xticks([])
a.set_yticks([])
a.axis("off")
#
if horizontal:
title_1 = fig.add_axes([0.0, axis_height, axis_width, title_height])
else:
title_1 = fig.add_axes([0.0, 3 * axis_height + 2 * title_height, 1.0, title_height])
title_1.set_xticks([])
title_1.set_yticks([])
title_1.axis("off")
# title_1.text(
# 0.5,
# 0.2,
# "Full moment\n" r"$\epsilon=%.2f$" % Eps,
# ha="center",
# va="bottom",
# size="x-large",
# fontsize=50,
# )
title_1.text(
0.5, 0.2, "$\epsilon=%.2f$" % Eps, ha="center", va="bottom", size="x-large", fontsize=50,
)
########################
## DC moment tensor:
img2, buf2 = Get_bb_img(MT_DC, color)
if horizontal:
ax_2 = fig.add_axes(
[
axis_width + ((axis_width - (axis_width * DC_scal)) / 2),
0.0 + ((axis_height - (axis_height * DC_scal)) / 2),
axis_width * DC_scal,
axis_height * DC_scal,
]
)
else:
ax_2 = fig.add_axes([0.0, axis_height + title_height, 1.0, axis_height])
if img2 is not None:
ax_2.imshow(img2 / np.max(img2.flatten()))
if horizontal:
ax_X = fig.add_axes(
[
axis_width + ((axis_width - (axis_width * DC_scal)) / 2),
0.0 + ((axis_height - (axis_height * DC_scal)) / 2),
axis_width * DC_scal,
axis_height * DC_scal,
],
label="Circle_ray",
)
else:
ax_X = fig.add_axes(
[0.0, axis_height + title_height, 1.0, axis_height], label="Circle_ray"
)
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(np.deg2rad(a)) * i / 90.0
y = np.cos(np.deg2rad(a)) * i / 90.0
p = Circle(
(x, y), 0.015, linewidth=2, edgecolor="k", zorder=0, facecolor="k", fill=True,
)
ax_X.add_patch(p)
ax_X.text(x - 0.005, y + 0.03, s=phase, fontsize=40)
for a in [ax_2, ax_X]:
a.set_xticks([])
a.set_yticks([])
a.axis("off")
if horizontal:
title_2 = fig.add_axes([axis_width, axis_height, axis_width, title_height])
else:
title_2 = fig.add_axes([0.0, 2 * axis_height + title_height, 1.0, title_height])
title_2.set_xticks([])
title_2.set_yticks([])
title_2.axis("off")
# title_2.text(
# 0.5,
# 0.2,
# "Double-Couple \n M0: %.2e" % M0_DC,
# ha="center",
# va="bottom",
# size="x-large",
# fontsize=25,
# )
# title_2.text(0.5, 0.2, "Direct", ha="center", va="bottom", size="x-large", fontsize=40)
### CLVD
img3, buf3 = Get_bb_img(MT_CLVD, color)
if horizontal:
ax_3 = fig.add_axes(
[
2 * (axis_width) + ((axis_width - (axis_width * CLVD_scal)) / 2),
0.0 + ((axis_height - (axis_height * CLVD_scal)) / 2),
axis_width * CLVD_scal,
axis_height * CLVD_scal,
]
)
else:
ax_3 = fig.add_axes([0.0, 0.0, 1.0, axis_height])
if img3 is not None:
ax_3.imshow(img3 / np.max(img3.flatten()))
if horizontal:
ax_X = fig.add_axes(
[
2 * (axis_width) + ((axis_width - (axis_width * CLVD_scal)) / 2),
0.0 + ((axis_height - (axis_height * CLVD_scal)) / 2),
axis_width * CLVD_scal,
axis_height * CLVD_scal,
],
label="Circle_ray",
)
else:
ax_X = fig.add_axes([0.0, 0.0, 1.0, axis_height], label="Circle_ray")
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(np.deg2rad(a)) * i / 90.0
y = np.cos(np.deg2rad(a)) * i / 90.0
p = Circle(
(x, y), 0.015, linewidth=2, edgecolor="k", zorder=0, facecolor="k", fill=True,
)
ax_X.add_patch(p)
ax_X.text(x - 0.005, y + 0.03, s=phase, fontsize=40)
for a in [ax_3, ax_X]:
a.set_xticks([])
a.set_yticks([])
a.axis("off")
if horizontal:
title_3 = fig.add_axes([2 * axis_width, axis_height, axis_width, title_height])
else:
title_3 = fig.add_axes([0.0, axis_height, 1.0, title_height])
title_3.set_xticks([])
title_3.set_yticks([])
title_3.axis("off")
# title_3.text(
# 0.5,
# 0.2,
# "CLVD \n M0: %.2e" % M0_CLVD,
# ha="center",
# va="bottom",
# size="x-large",
# fontsize=25,
# )
return fig
def Plot_GS_BB(
strikes, dips, rakes, azimuths, inc_angles, phase_names, color, height=None, horizontal=True,
):
if horizontal:
width = 5.0
height = 6.0
axis_height = 5.0 / height
resid_heigt = 1.0 - axis_height
title_height = resid_heigt
axis_width = 5.0 / width
else:
if height == None:
height = 19.0
axis_height = 5.0 / height
resid_height = 1.0 - 3.0 * axis_height
title_height = resid_height / 3.0
fig_bb = plt.figure(figsize=(5, 5), dpi=200)
ax_bb = fig_bb.add_axes([0.0, 0.0, 1.0, 1.0])
ax_bb.set_xticks([])
ax_bb.set_yticks([])
ax_bb.axis("off")
img = None
buf = io.BytesIO()
i = 0
for strike, dip, rake in zip(strikes, dips, rakes):
i += 1
b = beach(
fm=[strike, dip, rake],
width=990,
linewidth=0,
facecolor=color,
xy=(0, 0),
axes=ax_bb,
alpha=1,
zorder=i,
)
ax_bb.add_collection(b)
ax_bb.set_xlim((-1, 1))
ax_bb.set_ylim((-1, 1))
buf.seek(0)
fig_bb.savefig(buf, format="png", dpi=200)
buf.seek(0)
if img is None:
img = mpimg.imread(buf)
else:
img += mpimg.imread(buf)
plt.close(fig_bb)
if horizontal:
fig = plt.figure(figsize=(width, height), dpi=200)
ax_1 = fig.add_axes([0.0, 0.0, axis_width, axis_height])
else:
fig = plt.figure(figsize=(5, height), dpi=200)
ax_1 = fig.add_axes([0.0, 2 * (axis_height + title_height), 1.0, axis_height])
if img is not None:
ax_1.imshow(img / np.max(img.flatten()))
if horizontal:
ax_X = fig.add_axes([0.0, 0.0, axis_width, axis_height], label="Circle_ray")
else:
ax_X = fig.add_axes(
[0.0, 2 * (axis_height + title_height), 1.0, axis_height], label="Circle_ray",
)
ax_X.set_xlim((-1, 1))
ax_X.set_ylim((-1, 1))
p = Circle((0.0, 0,), 0.99, linewidth=2, edgecolor="k", zorder=0, fill=False)
ax_X.add_patch(p)
if azimuths is not None and inc_angles is not None:
for a, i, phase in zip(azimuths, inc_angles, phase_names):
if i > 90.0:
x = np.sin(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
y = np.cos(np.deg2rad(a + 180)) * (180.0 - i) / 90.0
else:
x = np.sin(
|
np.deg2rad(a)
|
numpy.deg2rad
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpathes
# var
L = 5
W = 5
N = 4
# const var
pi = 3.1415926
# wave
A = 0.4
u = 343
v = 40000
_lambda = u / v
w = 2 * pi * v
T = 2 * pi / w
rho = 1.293
# y = Acos(wt-2pi(sqrt(x^2+y^2+z^2))/lambda)
# v = -Awsin^2(wt-2pi(sqrt(x^2+y^2+z^2))/lambda)
# cos 2x = (cos x)^2 - (sin x)^2 = 1 - 2(sin x)^2
# (sin x)^2 = (1 - cos 2x)/2
# v^2 = Aw(1-cos(2wt - 4pi(r)/lambda)/2
# integral v^2 by time
# @v^2 = (1/2)A^2*w^2T - Awsin(4pi-4pi(r)/lambda)/4 + Awsin(-4pi(r)/lambda)/4
# = A^2*w + Awsin(4pi(r)/lambda)/4 - Awsin(4pi(r)/lambda)/4
# = A^2*w
# a = -Aw^2cos(wt-2pi(sqrt(x^2+y^2+z^2))/lambda)
# r = sqrt(x^2+y^2+z^2)
# dv/dx = 2piAwcos(wt-2pi(r)/lambda)/lambda
# -dp/dx = rho(-Aw^2cos(wt-2pi(r)/lambda)-Awsin(wt-2pi(r)/lambda)*2piAwcos(wt-2pi(r)/lambda)/lambda)
# = rho(-Aw^2cos(wt-2pi(r)/lambda)-piA^2*w^2sin(2wt-4pi(r)/lambda))
# integral by time(1T)
# @-dp/dx = rho(-Awsin(wt-2pi(r)/lambda)+(1/2)*piA^2*wcos(2wt-4pi(r)/lambda))
# = rho(-Awsin(-2pi(r)/lambda)+(1/2)*piA^2*wcos(4pi(r)/lambda)+Awsin(-2pi(r)/lambda)+piA^2*wcos(4pi(r)/lambda)/2)
# = rho(piA^2*wcos(4pi(r)/lambda))
def wave_f(x1, y1, f):
if f.fi:
_f = pi
else:
_f = 0
theta = 0
for x, y in f.points:
theta += np.cos(4 * pi * np.sqrt((x1 - x) ** 2 + (y1 - y) ** 2) / _lambda + _f)
return theta
# length
_l = L * _lambda / 2
_w = W * _lambda / 2
# degree
_N, _M = 50, 50
# be in real coordinate
def coordinate(x, y):
x = x * _w / _M
y = y * _l / _N
return x, y
# create zero array
array = np.zeros((_M, _N))
array_v = np.zeros((_M, _N))
# non-liner sounder
# x = acos(t) + b
# y = csin(t) + d
# e < t < f
class F:
def __init__(self, a, b, c, d, e, f, fi=False):
self.fi = fi
self.points = []
divide = np.maximum(_M, _N)
mini = (f - e) / divide
for i in range(divide):
t = mini * i + e
self.points.append([a * np.cos(t) + b, c * np.sin(t) + d])
# wave sounder
# f0: x = (w/2)cos(t)+w/2
# y = (l/2)sin(t)+l/2
# -pi<t<-pi/2
f0 = F(N * _lambda / 4, _w / 2, N * _lambda / 4, _l / 2, 0, pi/2)
f1 = F(N * _lambda / 4, _w / 2, N * _lambda / 4, _l / 2, pi/2, pi)
f2 = F(N * _lambda / 4, _w / 2, N * _lambda / 4, _l / 2, pi, 3*pi/2, True)
f3 = F(N * _lambda / 4, _w / 2, N * _lambda / 4, _l / 2, 3*pi/2, 2 * pi, True)
# simulation
for i in range(_M):
for j in range(_N):
_x, _y = coordinate(i, j)
array[_M - j - 1][_N - i - 1] += wave_f(_x, _y, f0) / _M ** 2
array[_M - j - 1][_N - i - 1] += wave_f(_x, _y, f1) / _M ** 2
array[_M - j - 1][_N - i - 1] += wave_f(_x, _y, f2) / _M ** 2
array[_M - j - 1][_N - i - 1] += wave_f(_x, _y, f3) / _M ** 2
# simulation
for i in range(_N):
for j in range(_M):
_x, _y = coordinate(i, j)
array[_M - j - 1][_N - i - 1] += wave_f(_x, _y, f0) / _M ** 2
array = array * rho * (pi * A ** 2 * w)
# U = 2 * pi * R^3 (_p/3rhoc^2 - rho*v^2/2)
array = 2 * pi * (
|
np.abs(array)
|
numpy.abs
|
"""
Numerical models
Models are defined by (constant) parameters and variables the model is
evaluated for. The variables can be thought of as the axes values of the
resulting (calculated) dataset.
As a simple example, consider a polynomial defined by its (constant)
coefficients. The model will evaluate the polynomial for the values,
and the result will be a :obj:`aspecd.dataset.CalculatedDataset` object
containing the values of the evaluated model in its data, and the
variables as its axes values.
Models can be seen as abstraction to simulations in some regard. In this
respect, they will play a central role in conjunction with fitting
models to data by adjusting their respective parameters, a quite general
approach in science and particularly in spectroscopy.
A bit of terminology
====================
parameters :
constant parameters (sometimes termed coefficients) characterising the model
Example: In case of a polynomial, the coefficients would be the
parameters of the model.
variables :
values to evaluate the model for
Example: In case of a polynomial, the *x* values the model is
evaluated for would be the variables, with the *y* values being the
corresponding depending values dictated by the model and its parameters.
Models provided within this module
==================================
Besides providing the basis for models for the ASpecD framework, this module
comes with a (growing) number of general-purpose models useful for basically
all kinds of spectroscopic data.
Here is a list as a first overview. For details, see the detailed
documentation of each of the classes, readily accessible by the link.
Primitive models
----------------
Primitive models are mainly used to create test datasets that can be
operated on afterwards. The particular strength and beauty of wrapping
essential one-liners of code with a full-fledged model class is twofold:
These classes return ASpecD datasets, and you can work completely in context
of recipe-driven data analysis, requiring no actual programming skills.
If nothing else, these primitive models can serve as a way to create
datasets with fixed data dimensions. Those datasets may be used as templates
for more advanced models, by using the :meth:`aspecd.model.Model.from_dataset`
method.
Having that said, here you go with a list of primitive models:
* :class:`aspecd.model.Zeros`
Dataset consisting entirely of zeros (in N dimensions)
* :class:`aspecd.model.Ones`
Dataset consisting entirely of ones (in N dimensions)
Mathematical models
-------------------
Besides the primitive models listed above, there is a growing number of
mathematical models implementing comparably simple mathematical equations
that are often used. Packages derived from the ASpecD framework may well
define more specific models as well.
* :class:`aspecd.model.Polynomial`
Polynomial (of arbitrary degree/order, depending on the number of
coefficients)
* :class:`aspecd.model.Gaussian`
Generalised Gaussian where amplitude, position, and width can be
set explicitly. Hence, this is usually *not* identical to the probability
density function (PDF) of a normally distributed random variable.
* :class:`aspecd.model.NormalisedGaussian`
Normalised Gaussian with an integral of one, identical to the probability
density function (PDF) of a normally distributed random variable.
* :class:`aspecd.model.Lorentzian`
Generalised Lorentzian where amplitude, position, and width can be
set explicitly. Hence, this is usually *not* identical to the probability
density function (PDF) of the Cauchy distribution.
* :class:`aspecd.model.NormalisedLorentzian`
Normalised Lorentzian with an integral of one, identical to the probability
density function (PDF) of the Cauchy distribution.
* :class:`aspecd.model.Sine`
Sine wave with adjustable amplitude, frequency, and phase.
* :class:`aspecd.model.Exponential`
Exponential function with adjustable prefactor and rate.
Composite models consisting of a sum of individual models
---------------------------------------------------------
Often you encounter situations where a model consists of a (weighted) sum of
individual models. A simple example would be a damped oscillation. Or think
of a spectral line consisting of several overlapping individual lines
(Lorentzian or Gaussian).
All this can be easily set up using the :class:`aspecd.model.CompositeModel`
class that lets you conveniently specify a list of models, their individual
parameters, and optional weights.
Family of curves
----------------
Systematically varying one parameter at a time for a given model is key
to understanding the impact this parameter has. Therefore, automatically
creating a family of curves with one parameter varied is quite convenient.
To achieve this, use the class :class:`aspecd.model.FamilyOfCurves` that will
take the name of a model (needs to be the name of an existing model class)
and create a family of curves for this model, adding the name of the
parameter as quantity to the additional axis.
Writing your own models
=======================
All models should inherit from the :class:`aspecd.model.Model` class.
Furthermore, they should conform to a series of requirements:
* Parameters are stored in the :attr:`aspecd.model.Model.parameters` dict.
Note that this is a :class:`dict`. In the simplest case, you may name the
corresponding key "coefficients", as in case of a polynomial. In other
cases, there are common names for parameters, such as "mu" and "sigma" for
a Gaussian. Whether the keys should be named this way or describe the
actual meaning of the parameter is partly a matter of personal taste. Use
whatever is more common in the given context, but tend to be descriptive.
Usually, implementing mathematical equations by simply naming every
variable according to the mathematical notation is a bad idea, as the
programmer will not know what these variables represent.
* Models create calculated datasets of class
:class:`aspecd.dataset.CalculatedDataset`.
The data of these datasets need to have dimensions corresponding to the
variables set for the model. Think of the variables as being the axes
values of the resulting dataset.
The ``_origdata`` property of the dataset is automatically set accordingly
(see below for details). This is crucially important to have the resulting
dataset work as expected, including undo and redo functionality within the
ASpecD framework. Remember: A calculated dataset is a regular dataset,
and you can perform all the tasks with you would do with other datasets,
including processing, analysis and alike.
* Model creation takes place entirely in the non-public ``_perform_task``
method of the model.
This method gets called from :meth:`aspecd.model.Model.create`, but not
before some background checks have been performed, including preparing the
metadata of the :obj:`aspecd.dataset.CalculatedDataset` object returned by
:meth:`aspecd.model.Model.create`.
After calling out to ``_perform_task``, the axes of the
:obj:`aspecd.dataset.CalculatedDataset` object returned by
:meth:`aspecd.model.Model.create` are set accordingly, *i.e.* fitting to
the shape of the data.
On the other hand, a series of things will be automatically taken care of
for you:
* Metadata of the resulting :obj:`aspecd.dataset.CalculatedDataset` object
are automatically set, including ``type`` (set to the full class name of
the model) and ``parameters`` (copied over from the parameters attribute
of the model).
* Axes of the resulting :obj:`aspecd.dataset.CalculatedDataset` object are
automatically adjusted according to the size and content of
the :attr:`aspecd.model.Model.variables` attribute.
In case you used :meth:`aspecd.model.Model.from_dataset`, the axes from
the dataset will be copied over from there.
* The ``_origdata`` property of the dataset is automatically set accordingly.
This is crucially important to have the resulting dataset work as
expected, including undo and redo functionality within the ASpecD framework.
Make sure your models do not raise errors such as :class:`ZeroDivisionError`
depending on the parameters set. Use the :func:`aspecd.utils.not_zero`
function where appropriate. This is particularly important in light of using
models in the context of automated fitting.
Module documentation
====================
"""
import copy
import numpy as np
import aspecd.dataset
import aspecd.exceptions
import aspecd.utils
from aspecd.utils import not_zero, isiterable
class Model:
"""
Base class for numerical models.
Models are defined by (constant) parameters and variables the model is
evaluated for. The variables can be thought of as the axes values of the
resulting (calculated) dataset.
As a simple example, consider a polynomial defined by its (constant)
coefficients. The model will evaluate the polynomial for the values,
and the result will be a :obj:`aspecd.dataset.CalculatedDataset` object
containing the values of the evaluated model in its data, and the
variables as its axes values.
Models can be seen as abstraction to simulations in some regard. In this
respect, they will play a central role in conjunction with fitting
models to data by adjusting their respective parameters, a quite general
approach in science and particularly in spectroscopy.
Attributes
----------
name : :class:`str`
Name of the model.
Defaults to the lower-case class name, don't change!
parameters : :class:`dict`
constant parameters characterising the model
variables : :class:`list`
values to evaluate the model for
Usually :class:`numpy.ndarray` arrays, one for each variable
The variables will become the values of the respective axes.
description : :class:`str`
Short description, to be set in class definition
references : :class:`list`
List of references with relevance for the implementation of the
processing step.
Use appropriate record types from the `bibrecord package
<https://bibrecord.docs.till-biskup.de/>`_.
.. versionchanged:: 0.3
New attribute :attr:`description`
.. versionchanged:: 0.3
New non-public method :meth:`_sanitise_parameters`
.. versionchanged:: 0.4
New attribute :attr:`references`
"""
def __init__(self):
self.name = aspecd.utils.full_class_name(self)
self.parameters = dict()
self.variables = []
self.description = 'Abstract model'
self.references = []
self._dataset = aspecd.dataset.CalculatedDataset()
self._axes_from_dataset = []
def create(self):
"""
Create dataset containing the evaluated model as data
The actual model creation should be implemented within the non-public
method :meth:`_perform_task`. Furthermore, you should make sure your
model will be evaluated for the values given in
:attr:`aspecd.model.Model.values` and the resulting dataset having set
the axes appropriately.
Furthermore, don't forget to set the ``_origdata`` property of the
dataset, usually simply by copying the ``data`` property over there
after it has been filled with content. This is crucially important
to have the resulting dataset work as expected, including undo and
redo functionality within the ASpecD framework. Remember: A
calculated dataset is a regular dataset, and you can perform all the
tasks with you would do with other datasets, including processing,
analysis and alike.
Returns
-------
dataset : :class:`aspecd.dataset.CalculatedDataset`
Calculated dataset containing the evaluated model as data
Raises
------
aspecd.exceptions.MissingParameterError
Raised if either parameters or values are not set
"""
self._sanitise_parameters()
self._check_prerequisites()
self._set_dataset_metadata()
self._perform_task()
self._set_dataset_axes()
self._set_dataset_origdata()
return self._dataset
def from_dataset(self, dataset=None):
"""
Obtain crucial information from an existing dataset.
Often, models should be calculated for the same values as an
existing dataset. Therefore, you can set the
:attr:`aspecd.model.Model.values` property from a given dataset.
If you get the variables from an existing dataset, the calculated
dataset containing the evaluated model will have the same axes
settings. Thus, it is pretty convenient to get a model with
identical axes, including quantity etcetera. This helps a lot with
plotting both, an (experimental) dataset and the model, in one plot.
Parameters
----------
dataset : :class:`aspecd.dataset.Dataset`
Dataset to obtain crucial information for building the model from
Raises
------
aspecd.exceptions.MissingDatasetError
Raised if no dataset is provided
"""
if not dataset:
raise aspecd.exceptions.MissingDatasetError
for index in range(len(dataset.data.axes)):
self.variables.append(dataset.data.axes[index].values)
self._axes_from_dataset = dataset.data.axes
def from_dict(self, dict_=None):
"""
Set attributes from dictionary.
Parameters
----------
dict_ : :class:`dict`
Dictionary containing information of a task.
Raises
------
aspecd.plotting.MissingDictError
Raised if no dict is provided.
"""
if not dict_:
raise aspecd.exceptions.MissingDictError(
'Need a dict to read from, but none given')
for key, value in dict_.items():
if hasattr(self, key):
setattr(self, key, value)
def _check_prerequisites(self):
if not self.parameters:
raise aspecd.exceptions.MissingParameterError(
'No parameters for model provided')
if len(self.variables) == 0:
raise aspecd.exceptions.MissingParameterError(
'No variables to evaluate model for provided')
def _sanitise_parameters(self):
"""Ensure parameters provided for model are correct.
Needs to be implemented in classes inheriting from Model
according to their needs. Most probably, you want to check for
correct types of all parameters as well as values within sensible
borders.
"""
def _perform_task(self):
"""Create the actual model and evaluate it for the given values.
The implementation of the actual model goes in here in all
classes inheriting from Model. This method is automatically
called by :meth:`self.create` after some background checks.
"""
# dummy to get tests to run
self._dataset.data.data = self.variables[0]
def _set_dataset_axes(self):
"""
Set axes of calculated dataset
In case a dataset has been used to obtain the variables for,
the axes get set from this dataset.
"""
if self._axes_from_dataset:
self._dataset.data.axes = self._axes_from_dataset
elif isinstance(self.variables[0], (list, np.ndarray)):
for index in range(len(self.variables)):
self._dataset.data.axes[index].values = self.variables[index]
else:
self._dataset.data.axes[0].values = np.asarray(self.variables)
def _set_dataset_metadata(self):
"""
Set calculation metadata of calculated dataset
Calculation type is set to the full class name of the respective
model, and parameters are copied over from the model parameters.
"""
self._dataset.metadata.calculation.type = self.name
self._dataset.metadata.calculation.parameters = self.parameters
def _set_dataset_origdata(self):
# pylint: disable=protected-access
self._dataset._origdata = copy.deepcopy(self._dataset.data)
class CompositeModel(Model):
"""
Composite model consisting of a weighted contributions of individual models.
Individual models can either be added up (default) or multiplied,
depending on which operators are provided. Both situations occur
frequently. If you would like to describe a spectrum as sum of Gaussian
or Lorentzian lines, you need to add the individual contributions. If
you would like to model a damped oscillation, you would need to multiply
the exponential decay onto the oscillation.
Attributes
----------
models : :class:`list`
Names of the models the composite model consists of
Each name needs to be the name of an existing model class.
parameters : :class:`list`
Constant parameters characterising each individual model
For the parameters that can (and need to) be set, consult the
documentation of each of the respective model classes specified in
the :attr:`models` attribute.
weights : :class:`list`
Factors used to weight the individual models.
Default: no weighting
operators : :class:`list`
Operators to be used for the individual models.
Addition ("+", "add", "plus") and multiplication ("*", "multiply",
"times") are supported.
Note that one operator less than models needs to be provided.
Default: add
Raises
------
IndexError
Raised if number of models, parameter sets, operators, and weights are
incompatible
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. The examples focus each on a single
aspect.
Suppose you would want to describe your data with a model consisting of
two Lorentzian line shapes. Starting from scratch, you need to create a
dummy dataset (using, *e.g.*, :class:`aspecd.model.Zeros`) of given
length and axes range. Based on that you can create your model:
.. code-block:: yaml
- kind: model
type: Zeros
properties:
parameters:
shape: 1001
range: [0, 20]
result: dummy
- kind: model
type: CompositeModel
from_dataset: dummy
properties:
models:
- Lorentzian
- Lorentzian
parameters:
- position: 3
- position: 5
result: multiple_lorentzians
Note that you need to provide parameters for each of the individual
models, even if the class for a model would work without explicitly
providing parameters.
Of course, if you start with an existing dataset (*e.g.*, loaded from
some real data), you could use the label to this dataset directly in
``from_dataset``, without needing to create a dummy dataset first.
While adding up the contributions of the individual components works
well for describing spectra, sometimes you need to multiply contributions.
Suppose you would want to create a damped oscillation consisting of a
sine and an exponential. Starting from scratch, you need to create a
dummy dataset (using, *e.g.*, :class:`aspecd.model.Zeros`) of given
length and axes range. Based on that you can create your model:
.. code-block:: yaml
- kind: model
type: Zeros
properties:
parameters:
shape: 1001
range: [0, 20]
result: dummy
- kind: model
type: CompositeModel
from_dataset: dummy
properties:
models:
- Sine
- Exponential
parameters:
- frequency: 1
phase: 1.57
- rate: -1
operators:
- multiply
result: damped_oscillation
Again, you need to provide parameters for each of the individual
models, even if the class for a model would work without explicitly
providing parameters.
.. versionadded:: 0.3
"""
def __init__(self):
super().__init__()
self.description = \
'Composite model consisting of several weighted models'
self.models = []
self.parameters = []
self.weights = []
self.operators = []
def _sanitise_parameters(self):
if not self.weights:
self.weights = np.ones(len(self.models))
if not self.operators:
for _ in self.models:
self.operators.append('+')
else:
self.operators.insert(0, '+')
if len(self.parameters) != len(self.models):
raise IndexError('Models and parameters count differs')
if len(self.weights) != len(self.models):
raise IndexError('Models and weights count differs')
if len(self.operators) != len(self.models):
raise IndexError('Models and operators count differs')
def _perform_task(self):
data = np.zeros(len(self.variables))
for idx, model_name in enumerate(self.models):
model = self._get_model(model_name)
for key in self.parameters[idx]:
# noinspection PyUnresolvedReferences
model.parameters[key] = self.parameters[idx][key]
model.variables = self.variables
# noinspection PyUnresolvedReferences
dataset = model.create()
if self.operators[idx] in ('+', 'plus', 'add'):
data += dataset.data.data * self.weights[idx]
if self.operators[idx] in ('*', 'times', 'multiply'):
data *= dataset.data.data * self.weights[idx]
self._dataset.data.data = data
@staticmethod
def _get_model(model_name):
try:
model = aspecd.utils.object_from_class_name(model_name)
except (ValueError, AttributeError):
model = aspecd.utils.object_from_class_name('aspecd.model.' +
model_name)
return model
class FamilyOfCurves(Model):
"""
Create a family of curves for a model, varying a single parameter.
Systematically varying one parameter at a time for a given model is key
to understanding the impact this parameter has. Therefore, automatically
creating a family of curves with one parameter varied is quite convenient.
This class will take the name of a model (needs to be the name of an
existing model class) and create a family of curves for this model,
adding the name of the parameter as quantity to the additional axis.
Attributes
----------
model : :class:`str`
Name of the model the family of curves should be calculated for
Needs to be the name of an existing model class.
vary : :class:`dict`
Name and values of the parameter to be varied
parameter : :class:`str`
Name of the parameter that should be varied
values : :class:`list`
Values of the parameter to be varied
Raises
------
ValueError
Raised if no model is provided
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. The examples focus each on a single
aspect.
Suppose you would want to create a family of curves of a Gaussian with
varying the width. Starting from scratch, you need to create a
dummy dataset (using, *e.g.*, :class:`aspecd.model.Zeros`) of given
length and axes range. Based on that you can create your family of curves:
.. code-block:: yaml
- kind: model
type: Zeros
properties:
parameters:
shape: 1001
range: [0, 20]
result: dummy
- kind: model
type: FamilyOfCurves
from_dataset: dummy
properties:
model: Gaussian
vary:
parameter: width
values: [1., 1.5, 2., 2.5, 3]
result: gaussian_with_varied_width
This would create a 2D dataset with a Gaussian with standard values for
amplitude and position and the value for the width varied as given.
Of course, if you start with an existing dataset (*e.g.*, loaded from
some real data), you could use the label to this dataset directly in
``from_dataset``, without needing to create a dummy dataset first.
If you would like to control additional parameters of the Gaussian,
you can do that as well:
.. code-block:: yaml
- kind: model
type: FamilyOfCurves
from_dataset: dummy
properties:
model: Gaussian
parameters:
amplitude: 3.
position: -1
vary:
parameter: width
values: [1., 1.5, 2., 2.5, 3]
result: gaussian_with_varied_width
Note that if you provide a value for the parameter to be varied in the
list of parameters, it will be silently overwritten by the values
provided with ``vary``.
.. versionadded:: 0.3
"""
def __init__(self):
super().__init__()
self.description = 'Family of curves for a model with one parameter ' \
'varied'
self.model = None
self.vary = dict()
def _sanitise_parameters(self):
if not self.model:
raise ValueError('Missing a model')
if not isiterable(self.vary["values"]):
self.vary["values"] = [self.vary["values"]]
if not self.parameters:
self.parameters[self.vary["parameter"]] = self.vary["values"][0]
# noinspection PyUnresolvedReferences
def _perform_task(self):
self._dataset.data.data = \
np.zeros([len(self.variables), len(self.vary["values"])])
model = self._get_model(self.model)
model.variables = self.variables
for key in self.parameters:
model.parameters[key] = self.parameters[key]
for idx, value in enumerate(self.vary["values"]):
model.parameters[self.vary["parameter"]] = value
dataset = model.create()
self._dataset.data.data[:, idx] = dataset.data.data
self._dataset.data.axes[-1].quantity = self.vary["parameter"]
@staticmethod
def _get_model(model_name):
try:
model = aspecd.utils.object_from_class_name(model_name)
except (ValueError, AttributeError):
model = aspecd.utils.object_from_class_name('aspecd.model.' +
model_name)
return model
class Zeros(Model):
# noinspection PyUnresolvedReferences
"""
Zeros of given shape.
One of the most primitive models: zeros in N dimensions.
This model is quite helpful for creating test datasets, *e.g.* with
added noise (of different colour). Basically, it can be thought of as a
wrapper for :func:`numpy.zeros`. Its particular strength is that using
this model, creating test datasets becomes straight-forward in context
of recipe-driven data analysis.
Attributes
----------
parameters : :class:`dict`
All parameters necessary for this step.
shape : :class:`list`
shape of the data
Have in mind that ND datasets get huge very fast. Therefore,
it is *not* the best idea to create an 3D dataset with zeros
with 2**12 elements along each dimension.
range : :class:`list`
range of each of the axes
Useful if you want to specify the axes values as well.
If the data are multidimensional, one range for each axis needs
to be provided.
Raises
------
aspecd.exceptions.MissingParameterError
Raised if no shape is given
IndexError
Raised if elements in shape and range are incompatible
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. The examples focus each on a single
aspect.
Creating a dataset consisting of 2**10 zeros is quite simple:
.. code-block:: yaml
- kind: model
type: Zeros
properties:
parameters:
shape: 1024
result: 1d_zeros
Of course, you are not limited to 1D datasets, and you can easily create
ND datasets as well:
.. code-block:: yaml
- kind: model
type: Zeros
properties:
parameters:
shape: [1024, 256, 256]
result: 3d_zeros
Please have in mind that the memory of your computer is usually limited
and that ND datasets become huge very fast. Hence, creating a 3D array
with 2**10 elements along each dimension is most probably *not* the best
idea.
Suppose you not only want to create a dataset with a given shape,
but set the axes values (*i.e.*, their range) as well:
.. code-block:: yaml
- kind: model
type: Zeros
properties:
parameters:
shape: 1024
range: [35, 42]
result: 1d_zeros
This would create a 1D dataset with 1024 values, with the axes
values spanning a range from 35 to 42. Of course, the same can be done
with ND datasets.
Now, let's assume that you would want to play around with the different
types of (coloured) noise. Therefore, you would want to first create a
dataset and afterwards add noise to it:
.. code-block:: yaml
- kind: model
type: Zeros
properties:
parameters:
shape: 8192
result: 1d_zeros
- kind: processing
type: Noise
properties:
parameters:
normalise: True
This would create a dataset consisting of 2**14 zeros and add pink
(1/*f*) noise to it that is normalised (has an amplitude of 1). To check
that the noise is really 1/*f* noise, you may look at its power density.
See :class:`aspecd.analysis.PowerDensitySpectrum` for details, including
how to even plot both, the power density spectrum and a linear fit
together in one figure.
.. versionadded:: 0.3
"""
def __init__(self):
super().__init__()
self.description = "Model containing only zeros"
self.parameters["shape"] = None
self.parameters["range"] = None
def _sanitise_parameters(self):
if not self.variables:
self.variables = [0]
if not self.parameters["shape"]:
raise aspecd.exceptions.MissingParameterError(
message="Parameter 'shape' missing")
if not self.parameters["shape"]:
self.parameters["shape"] = []
if isiterable(self.variables[0]):
for index in range(len(self.variables)):
# noinspection PyTypeChecker
self.parameters["shape"].append(len(self.variables[index]))
else:
self.parameters["shape"] = len(self.variables)
if not isiterable(self.parameters["shape"]):
self.parameters["shape"] = [self.parameters["shape"]]
if self.parameters["range"]:
if not isiterable(self.parameters["range"][0]):
self.parameters["range"] = [self.parameters["range"]]
if len(self.parameters["shape"]) != len(self.parameters["range"]):
raise IndexError('Shape and range must be compatible')
def _perform_task(self):
self._dataset.data.data = np.zeros(self.parameters["shape"])
if self.parameters["range"]:
self._set_variables()
def _set_variables(self):
self.variables = []
shape = self.parameters["shape"]
range_ = self.parameters["range"]
for dim in range(self._dataset.data.data.ndim):
axis_values = \
np.linspace(range_[dim][0], range_[dim][1], shape[dim])
self.variables.append(axis_values)
class Ones(Model):
# noinspection PyUnresolvedReferences
"""
Ones of given shape.
One of the most primitive models: ones in N dimensions.
This model is quite helpful for creating test datasets, *e.g.* with
added noise (of different colour). Basically, it can be thought of as a
wrapper for :func:`numpy.ones`. Its particular strength is that using
this model, creating test datasets becomes straight-forward in context
of recipe-driven data analysis.
Attributes
----------
parameters : :class:`dict`
All parameters necessary for this step.
shape : :class:`list`
shape of the data
Have in mind that ND datasets get huge very fast. Therefore,
it is *not* the best idea to create an 3D dataset with ones
with 2**12 elements along each dimension.
range : :class:`list`
range of each of the axes
Useful if you want to specify the axes values as well.
If the data are multidimensional, one range for each axis needs
to be provided.
Raises
------
aspecd.exceptions.MissingParameterError
Raised if no shape is given
IndexError
Raised if elements in shape and range are incompatible
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. The examples focus each on a single
aspect.
Creating a dataset consisting of 2**10 ones is quite simple:
.. code-block:: yaml
- kind: model
type: Ones
properties:
parameters:
shape: 1024
result: 1d_ones
Of course, you are not limited to 1D datasets, and you can easily create
ND datasets as well:
.. code-block:: yaml
- kind: model
type: Ones
properties:
parameters:
shape: [1024, 256, 256]
result: 3d_ones
Please have in mind that the memory of your computer is usually limited
and that ND datasets become huge very fast. Hence, creating a 3D array
with 2**10 elements along each dimension is most probably *not* the best
idea.
Suppose you not only want to create a dataset with a given shape,
but set the axes values (*i.e.*, their range) as well:
.. code-block:: yaml
- kind: model
type: Ones
properties:
parameters:
shape: 1024
range: [35, 42]
result: 1d_zeros
This would create a 1D dataset with 1024 values, with the axes
values spanning a range from 35 to 42. Of course, the same can be done
with ND datasets.
Now, let's assume that you would want to play around with the different
types of (coloured) noise. Therefore, you would want to first create a
dataset and afterwards add noise to it:
.. code-block:: yaml
- kind: model
type: Ones
properties:
parameters:
shape: 8192
result: 1d_ones
- kind: processing
type: Noise
properties:
parameters:
normalise: True
This would create a dataset consisting of 2**14 ones and add pink
(1/*f*) noise to it that is normalised (has an amplitude of 1). To check
that the noise is really 1/*f* noise, you may look at its power density.
See :class:`aspecd.analysis.PowerDensitySpectrum` for details, including
how to even plot both, the power density spectrum and a linear fit
together in one figure.
.. versionadded:: 0.3
"""
def __init__(self):
super().__init__()
self.description = "Model containing only ones"
self.parameters["shape"] = None
self.parameters["range"] = None
def _sanitise_parameters(self):
if not self.variables:
self.variables = [0]
if not self.parameters["shape"]:
raise aspecd.exceptions.MissingParameterError(
message="Parameter 'shape' missing")
if not self.parameters["shape"]:
self.parameters["shape"] = []
if isiterable(self.variables[0]):
for index in range(len(self.variables)):
# noinspection PyTypeChecker
self.parameters["shape"].append(len(self.variables[index]))
else:
self.parameters["shape"] = len(self.variables)
if not isiterable(self.parameters["shape"]):
self.parameters["shape"] = [self.parameters["shape"]]
if self.parameters["range"]:
if not isiterable(self.parameters["range"][0]):
self.parameters["range"] = [self.parameters["range"]]
if len(self.parameters["shape"]) != len(self.parameters["range"]):
raise IndexError('Shape and range must be compatible')
def _perform_task(self):
self._dataset.data.data = np.ones(self.parameters["shape"])
if self.parameters["range"]:
self._set_variables()
def _set_variables(self):
self.variables = []
shape = self.parameters["shape"]
range_ = self.parameters["range"]
for dim in range(self._dataset.data.data.ndim):
axis_values = \
|
np.linspace(range_[dim][0], range_[dim][1], shape[dim])
|
numpy.linspace
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Author: frederic $
# $Date: 2016/07/12 13:50:29 $
# $Id: tide_funcs.py,v 1.4 2016/07/12 13:50:29 frederic Exp $
"""Functions for calculating correlations and similar metrics between arrays."""
import logging
import matplotlib.pyplot as plt
import numpy as np
import pyfftw
import pyfftw.interfaces.scipy_fftpack as fftpack
import scipy as sp
from numba import jit
from numpy.fft import irfftn, rfftn
from scipy import signal
from sklearn.metrics import mutual_info_score
import rapidtide.fit as tide_fit
import rapidtide.miscmath as tide_math
import rapidtide.resample as tide_resample
import rapidtide.util as tide_util
pyfftw.interfaces.cache.enable()
LGR = logging.getLogger("GENERAL")
# ---------------------------------------- Global constants -------------------------------------------
defaultbutterorder = 6
MAXLINES = 10000000
donotbeaggressive = True
donotusenumba = True
# ----------------------------------------- Conditional imports ---------------------------------------
def conditionaljit():
"""Wrap functions in jit if numba is enabled."""
def resdec(f):
if donotusenumba:
return f
return jit(f, nopython=False)
return resdec
def disablenumba():
"""Set a global variable to disable numba."""
global donotusenumba
donotusenumba = True
# --------------------------- Correlation functions -------------------------------------------------
def check_autocorrelation(
corrscale,
thexcorr,
delta=0.1,
acampthresh=0.1,
aclagthresh=10.0,
displayplots=False,
detrendorder=1,
):
"""Check for autocorrelation in an array.
Parameters
----------
corrscale
thexcorr
delta
acampthresh
aclagthresh
displayplots
windowfunc
detrendorder
Returns
-------
sidelobetime
sidelobeamp
"""
lookahead = 2
peaks = tide_fit.peakdetect(thexcorr, x_axis=corrscale, delta=delta, lookahead=lookahead)
maxpeaks = np.asarray(peaks[0], dtype="float64")
if len(peaks[0]) > 0:
LGR.debug(peaks)
zeropkindex = np.argmin(abs(maxpeaks[:, 0]))
for i in range(zeropkindex + 1, maxpeaks.shape[0]):
if maxpeaks[i, 0] > aclagthresh:
return None, None
if maxpeaks[i, 1] > acampthresh:
sidelobetime = maxpeaks[i, 0]
sidelobeindex = tide_util.valtoindex(corrscale, sidelobetime)
sidelobeamp = thexcorr[sidelobeindex]
numbins = 1
while (sidelobeindex + numbins < np.shape(corrscale)[0] - 1) and (
thexcorr[sidelobeindex + numbins] > sidelobeamp / 2.0
):
numbins += 1
sidelobewidth = (
corrscale[sidelobeindex + numbins] - corrscale[sidelobeindex]
) * 2.0
fitstart = sidelobeindex - numbins
fitend = sidelobeindex + numbins
sidelobeamp, sidelobetime, sidelobewidth = tide_fit.gaussfit(
sidelobeamp,
sidelobetime,
sidelobewidth,
corrscale[fitstart : fitend + 1],
thexcorr[fitstart : fitend + 1],
)
if displayplots:
plt.plot(
corrscale[fitstart : fitend + 1],
thexcorr[fitstart : fitend + 1],
"k",
corrscale[fitstart : fitend + 1],
tide_fit.gauss_eval(
corrscale[fitstart : fitend + 1],
[sidelobeamp, sidelobetime, sidelobewidth],
),
"r",
)
plt.show()
return sidelobetime, sidelobeamp
return None, None
def shorttermcorr_1D(
data1, data2, sampletime, windowtime, samplestep=1, detrendorder=0, windowfunc="hamming",
):
"""Calculate short-term sliding-window correlation between two 1D arrays.
Parameters
----------
data1
data2
sampletime
windowtime
samplestep
detrendorder
windowfunc
Returns
-------
times
corrpertime
ppertime
"""
windowsize = int(windowtime // sampletime)
halfwindow = int((windowsize + 1) // 2)
times = []
corrpertime = []
ppertime = []
for i in range(halfwindow, np.shape(data1)[0] - halfwindow, samplestep):
dataseg1 = tide_math.corrnormalize(
data1[i - halfwindow : i + halfwindow],
detrendorder=detrendorder,
windowfunc=windowfunc,
)
dataseg2 = tide_math.corrnormalize(
data2[i - halfwindow : i + halfwindow],
detrendorder=detrendorder,
windowfunc=windowfunc,
)
thepcorr = sp.stats.stats.pearsonr(dataseg1, dataseg2)
times.append(i * sampletime)
corrpertime.append(thepcorr[0])
ppertime.append(thepcorr[1])
return (
np.asarray(times, dtype="float64"),
np.asarray(corrpertime, dtype="float64"),
np.asarray(ppertime, dtype="float64"),
)
def shorttermcorr_2D(
data1,
data2,
sampletime,
windowtime,
samplestep=1,
laglimits=None,
weighting="None",
zeropadding=0,
windowfunc="None",
detrendorder=0,
display=False,
):
"""Calculate short-term sliding-window correlation between two 2D arrays.
Parameters
----------
data1
data2
sampletime
windowtime
samplestep
laglimits
weighting
zeropadding
windowfunc
detrendorder
display
Returns
-------
times
xcorrpertime
Rvals
delayvals
valid
"""
windowsize = int(windowtime // sampletime)
halfwindow = int((windowsize + 1) // 2)
if laglimits is not None:
lagmin = laglimits[0]
lagmax = laglimits[1]
else:
lagmin = -windowtime / 2.0
lagmax = windowtime / 2.0
LGR.debug(f"lag limits: {lagmin} {lagmax}")
"""dt = np.diff(time)[0] # In days...
fs = 1.0 / dt
nfft = nperseg
noverlap = (nperseg - 1)"""
dataseg1 = tide_math.corrnormalize(
data1[0 : 2 * halfwindow], detrendorder=detrendorder, windowfunc=windowfunc
)
dataseg2 = tide_math.corrnormalize(
data2[0 : 2 * halfwindow], detrendorder=detrendorder, windowfunc=windowfunc
)
thexcorr = fastcorrelate(dataseg1, dataseg2, weighting=weighting, zeropadding=zeropadding)
xcorrlen = np.shape(thexcorr)[0]
xcorr_x = (
np.arange(0.0, xcorrlen) * sampletime - (xcorrlen * sampletime) / 2.0 + sampletime / 2.0
)
xcorrpertime = []
times = []
Rvals = []
delayvals = []
valid = []
for i in range(halfwindow, np.shape(data1)[0] - halfwindow, samplestep):
dataseg1 = tide_math.corrnormalize(
data1[i - halfwindow : i + halfwindow],
detrendorder=detrendorder,
windowfunc=windowfunc,
)
dataseg2 = tide_math.corrnormalize(
data2[i - halfwindow : i + halfwindow],
detrendorder=detrendorder,
windowfunc=windowfunc,
)
times.append(i * sampletime)
xcorrpertime.append(
fastcorrelate(dataseg1, dataseg2, weighting=weighting, zeropadding=zeropadding)
)
(
maxindex,
thedelayval,
theRval,
maxsigma,
maskval,
failreason,
peakstart,
peakend,
) = tide_fit.findmaxlag_gauss(
xcorr_x,
xcorrpertime[-1],
lagmin,
lagmax,
1000.0,
refine=True,
useguess=False,
fastgauss=False,
displayplots=False,
)
delayvals.append(thedelayval)
Rvals.append(theRval)
if failreason == 0:
valid.append(1)
else:
valid.append(0)
if display:
plt.imshow(xcorrpertime)
return (
np.asarray(times, dtype="float64"),
np.asarray(xcorrpertime, dtype="float64"),
np.asarray(Rvals, dtype="float64"),
np.asarray(delayvals, dtype="float64"),
np.asarray(valid, dtype="float64"),
)
def calc_MI(x, y, bins=50):
"""Calculate mutual information between two arrays.
Notes
-----
From https://stackoverflow.com/questions/20491028/
optimal-way-to-compute-pairwise-mutual-information-using-numpy/
20505476#20505476
"""
c_xy = np.histogram2d(x, y, bins)[0]
mi = mutual_info_score(None, None, contingency=c_xy)
return mi
@conditionaljit()
def mutual_info_2d(x, y, sigma=1, bins=(256, 256), fast=False, normalized=True, EPS=1.0e-6):
"""Compute (normalized) mutual information between two 1D variate from a joint histogram.
Parameters
----------
x : 1D array
first variable
y : 1D array
second variable
sigma : float, optional
Sigma for Gaussian smoothing of the joint histogram.
Default = 1.
bins : tuple, optional
fast : bool, optional
normalized : bool
If True, this will calculate the normalized mutual information from [1]_.
Default = False.
EPS : float, optional
Default = 1.0e-6.
Returns
-------
nmi: float
the computed similariy measure
Notes
-----
From <NAME>
References
----------
.. [1] Studholme, jhill & jhawkes (1998).
"A normalized entropy measure of 3-D medical image alignment".
in Proc. Medical Imaging 1998, vol. 3338, San Diego, CA, pp. 132-143.
"""
if fast:
xstart = bins[0][0]
xend = bins[0][-1]
ystart = bins[1][0]
yend = bins[1][-1]
numxbins = len(bins[0]) - 1
numybins = len(bins[1]) - 1
cuts = (x >= xstart) & (x < xend) & (y >= ystart) & (y < yend)
c = ((x[cuts] - xstart) / (xend - xstart) * numxbins).astype(np.int_)
c += ((y[cuts] - ystart) / (yend - ystart) * numybins).astype(np.int_) * numxbins
jh = np.bincount(c, minlength=numxbins * numybins).reshape(numxbins, numybins)
else:
jh, xbins, ybins =
|
np.histogram2d(x, y, bins=bins)
|
numpy.histogram2d
|
import os, sys
import argparse
import time
import random
import cv2
import numpy as np
import keras
from keras.utils import np_utils
#from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers import BatchNormalization, ReLU
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.utils.training_utils import multi_gpu_model
import keras.backend.tensorflow_backend as K
import nsml
from nsml.constants import DATASET_PATH, GPU_NUM
IMSIZE = 120, 60
VAL_RATIO = 0.1
RANDOM_SEED = 1234
def bind_model(model):
def save(dir_name):
os.makedirs(dir_name, exist_ok=True)
model.save_weights(os.path.join(dir_name, 'model'))
print('model saved!')
def load(dir_name):
model.load_weights(os.path.join(dir_name, 'model'))
print('model loaded!')
def infer(data): # test mode
##### DO NOT CHANGE ORDER OF TEST DATA #####
X = ImagePreprocessing(data)
X = np.array(X)
X = np.expand_dims(X, axis=-1)
pred = model.predict_classes(X) # 모델 예측 결과: 0-3
print('Prediction done!\n Saving the result...')
return pred
nsml.bind(save=save, load=load, infer=infer)
def Class2Label(cls):
lb = [0] * 4
lb[int(cls)] = 1
return lb
def DataLoad(imdir):
impath = [os.path.join(dirpath, f) for dirpath, dirnames, files in os.walk(imdir) for f in files if all(s in f for s in ['.jpg'])]
img = []
lb = []
print('Loading', len(impath), 'images ...')
for i, p in enumerate(impath):
img_whole = cv2.imread(p, 0)
h, w = img_whole.shape
h_, w_ = h, w//2
l_img = img_whole[:, w_:2*w_]
r_img = img_whole[:, :w_]
_, l_cls, r_cls = os.path.basename(p).split('.')[0].split('_')
if l_cls=='0' or l_cls=='1' or l_cls=='2' or l_cls=='3':
img.append(l_img); lb.append(Class2Label(l_cls))
if r_cls=='0' or r_cls=='1' or r_cls=='2' or r_cls=='3':
img.append(r_img); lb.append(Class2Label(r_cls))
print(len(img), 'data with label 0-3 loaded!')
return img, lb
def ImagePreprocessing(img):
# 자유롭게 작성
h, w = IMSIZE
print('Preprocessing ...')
for i, im, in enumerate(img):
tmp = cv2.resize(im, dsize=(w, h), interpolation=cv2.INTER_AREA)
tmp = tmp / 255.
img[i] = tmp
print(len(img), 'images processed!')
return img
def SampleModelKeras(in_shape, num_classes):
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', input_shape=in_shape))
model.add(BatchNormalization(axis=-1))
model.add(ReLU())
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(ReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(ReLU())
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(ReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(ReLU())
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(ReLU())
model.add(Flatten())
model.add(Dense(256*4*4, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
return model
def ParserArguments(args):
# Setting Hyperparameters
args.add_argument('--epoch', type=int, default=10) # epoch 수 설정
args.add_argument('--batch_size', type=int, default=8) # batch size 설정
args.add_argument('--learning_rate', type=float, default=1e-4) # learning rate 설정
args.add_argument('--num_classes', type=int, default=4) # 분류될 클래스 수는 4개
# DO NOT CHANGE (for nsml)
args.add_argument('--mode', type=str, default='train', help='submit일 때 test로 설정됩니다.')
args.add_argument('--iteration', type=str, default='0',
help='fork 명령어를 입력할때의 체크포인트로 설정됩니다. 체크포인트 옵션을 안주면 마지막 wall time 의 model 을 가져옵니다.')
args.add_argument('--pause', type=int, default=0, help='model 을 load 할때 1로 설정됩니다.')
config = args.parse_args()
return config.epoch, config.batch_size, config.num_classes, config.learning_rate, config.pause, config.mode
if __name__ == '__main__':
args = argparse.ArgumentParser()
nb_epoch, batch_size, num_classes, learning_rate, ifpause, ifmode = ParserArguments(args)
seed = 1234
np.random.seed(seed)
""" Model """
h, w = IMSIZE
model = SampleModelKeras(in_shape=(h, w, 1), num_classes=num_classes)
sgd = optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)
#adam = optimizers.Adam(lr=learning_rate, decay=1e-5)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['categorical_accuracy'])
bind_model(model)
if ifpause: ## test mode일 때
print('Inferring Start...')
nsml.paused(scope=locals())
if ifmode == 'train': ### training mode일 때
print('Training Start...')
images, labels = DataLoad(os.path.join(DATASET_PATH, 'train'))
images = ImagePreprocessing(images)
## data 섞기
images = np.array(images)
images =
|
np.expand_dims(images, axis=-1)
|
numpy.expand_dims
|
def diffArea(nest, outlier = 0, data = 0, kinds = 'all', axis = 'probability', ROI = 20 , mu = 0, sigma = 1, weight = False, interpolator = 'linear', distribuition = 'normal',seed = None, plot = True):
"""
Return an error area between a analitic function and a estimated discretization from a distribuition.
Parameters
----------
nest: int
The number of estimation points.
outlier: int, optional
Is the point of an outlier event, e.g outlier = 50 will put an event in -50 and +50 if mu = 0.
Defaut is 0
data: int, optional
If data > 0, a randon data will be inserted insted analitcs data.
Defaut is 0.
kinds: str or array, optional
specifies the kind of distribuition to analize.
('Linspace', 'CDFm', 'PDFm', 'iPDF1', 'iPDF2', 'all').
Defaut is 'all'.
axis: str, optional
specifies the x axis to analize
('probability', 'derivative', '2nd_derivative', 'X').
Defaut is 'probability'.
ROI: int, optional
Specifies the number of regions of interest.
Defaut is 20.
mu: int, optional
Specifies the mean of distribuition.
Defaut is 0.
sigma: int, optional
Specifies the standard desviation of a distribuition.
Defaut is 1.
weight: bool, optional
if True, each ROI will have a diferent weight to analyze.
Defaut is False
interpolator: str, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of zeroth, first, second or third order) or as an
integer specifying the order of the spline interpolator to use.
Default is 'linear'.
distribuition: str, optional
Select the distribuition to analyze.
('normal', 'lognormal')
Defaut is 'normal'
plot: bool, optional
If True, a plot will be ploted with the analyzes
Defaut is True
Returns
-------
a, [b,c]: float and float of ndarray. area,[probROIord,areaROIord]
returns the sum of total error area and the 'x' and 'y' values.
"""
import numpy as np
from scipy.stats import norm, lognorm
from scipy.interpolate import interp1d
from numpy import exp
import matplotlib.pyplot as plt
from statsmodels.distributions import ECDF
from distAnalyze import pdf, dpdf, ddpdf, PDF, dPDF, ddPDF
area = []
n = []
data = int(data)
if distribuition == 'normal':
outlier_inf = outlier_sup = outlier
elif distribuition == 'lognormal':
outlier_inf = 0
outlier_sup = outlier
ngrid = int(1e6)
truth = pdf
if axis == 'probability':
truth1 = pdf
elif axis == 'derivative':
truth1 = dpdf
elif axis == '2nd_derivative':
truth1 = ddpdf
elif axis == 'X':
truth1 = lambda x,mu,sigma,distribuition: x
#else: return 'No valid axis'
probROIord = {}
areaROIord = {}
div = {}
if seed is not None:
np.random.set_state(seed)
if data:
if distribuition == 'normal':
d = np.random.normal(mu,sigma,data)
elif distribuition == 'lognormal':
d = np.random.lognormal(mu, sigma, data)
if kinds == 'all':
kinds = ['Linspace', 'CDFm', 'PDFm', 'iPDF1', 'iPDF2']
elif type(kinds) == str:
kinds = [kinds]
for kind in kinds:
if distribuition == 'normal':
inf, sup = norm.interval(0.9999, loc = mu, scale = sigma)
elif distribuition == 'lognormal':
inf, sup = lognorm.interval(0.9999, sigma, loc = 0, scale = exp(mu))
inf = lognorm.pdf(sup, sigma, loc = 0, scale = np.exp(mu))
inf = lognorm.ppf(inf, sigma, loc = 0, scale = np.exp(mu))
xgrid = np.linspace(inf,sup,ngrid)
xgridROI = xgrid.reshape([ROI,ngrid//ROI])
dx = np.diff(xgrid)[0]
if kind == 'Linspace':
if not data:
xest = np.linspace(inf-outlier_inf,sup+outlier_sup,nest)
else:
if distribuition == 'normal':
#d = np.random.normal(loc = mu, scale = sigma, size = data)
inf,sup = min(d),max(d)
xest = np.linspace(inf-outlier_inf,sup+outlier_sup,nest)
elif distribuition == 'lognormal':
#d = np.random.lognormal(mean = mu, sigma = sigma, size = data)
inf,sup = min(d),max(d)
xest = np.linspace(inf-outlier_inf,sup+outlier_sup,nest)
yest = pdf(xest,mu,sigma,distribuition)
elif kind == 'CDFm':
eps = 5e-5
yest = np.linspace(0+eps,1-eps,nest)
if distribuition == 'normal':
if not data:
xest = norm.ppf(yest, loc = mu, scale = sigma)
yest = pdf(xest,mu,sigma,distribuition)
else:
#d = np.random.normal(loc = mu, scale = sigma, size = data)
ecdf = ECDF(d)
inf,sup = min(d),max(d)
xest = np.linspace(inf,sup,data)
yest = ecdf(xest)
interp = interp1d(yest,xest,fill_value = 'extrapolate', kind = 'nearest')
yest = np.linspace(eps,1-eps,nest)
xest = interp(yest)
elif distribuition == 'lognormal':
if not data:
xest = lognorm.ppf(yest, sigma, loc = 0, scale = exp(mu))
yest = pdf(xest,mu,sigma,distribuition)
else:
#d = np.random.lognormal(mean = mu, sigma = sigma, size = data)
ecdf = ECDF(d)
inf,sup = min(d),max(d)
xest = np.linspace(inf,sup,nest)
yest = ecdf(xest)
interp = interp1d(yest,xest,fill_value = 'extrapolate', kind = 'nearest')
yest = np.linspace(eps,1-eps,nest)
xest = interp(yest)
elif kind == 'PDFm':
xest, yest = PDF(nest,mu,sigma, distribuition, outlier, data, seed)
elif kind == 'iPDF1':
xest, yest = dPDF(nest,mu,sigma, distribuition, outlier, data, 10, seed)
elif kind == 'iPDF2':
xest, yest = ddPDF(nest,mu,sigma, distribuition, outlier, data, 10, seed)
YY = pdf(xest,mu, sigma,distribuition)
fest = interp1d(xest,YY,kind = interpolator, bounds_error = False, fill_value = (YY[0],YY[-1]))
#fest = lambda x: np.concatenate([fest1(x)[fest1(x) != -1],np.ones(len(fest1(x)[fest1(x) == -1]))*fest1(x)[fest1(x) != -1][-1]])
yestGrid = []
ytruthGrid = []
ytruthGrid2 = []
divi = []
for i in range(ROI):
yestGrid.append([fest(xgridROI[i])])
ytruthGrid.append([truth(xgridROI[i],mu,sigma,distribuition)])
ytruthGrid2.append([truth1(xgridROI[i],mu,sigma,distribuition)])
divi.append(len(np.intersect1d(np.where(xest >= min(xgridROI[i]))[0], np.where(xest < max(xgridROI[i]))[0])))
diff2 = np.concatenate(abs((np.array(yestGrid) - np.array(ytruthGrid))*dx))
#diff2[np.isnan(diff2)] = 0
areaROI = np.sum(diff2,1)
divi = np.array(divi)
divi[divi == 0] = 1
try:
probROI = np.mean(np.sum(ytruthGrid2,1),1)
except:
probROI = np.mean(ytruthGrid2,1)
probROIord[kind] = np.sort(probROI)
index = np.argsort(probROI)
areaROIord[kind] = areaROI[index]
#deletes = ~np.isnan(areaROIord[kind])
#areaROIord[kind] = areaROIord[kind][deletes]
#probROIord[kind] = probROIord[kind][deletes]
area = np.append(area,np.sum(areaROIord[kind]))
n = np.append(n,len(probROIord[kind]))
div[kind] = divi[index]
if plot:
if weight:
plt.logy(probROIord[kind],areaROIord[kind]*div[kind],'-o',label = kind, ms = 3)
else: plt.plot(probROIord[kind],areaROIord[kind],'-o',label = kind, ms = 3)
plt.yscale('log')
plt.xlabel(axis)
plt.ylabel('Error')
plt.legend()
#plt.title('%s - Pontos = %d, div = %s - %s' %(j,nest, divs,interpolator))
return area,[probROIord,areaROIord]
def diffArea3(nest = None, outlier = 0, data = 0, kinds = 'all', axis = 'probability', ROI = 20 , mu = 0, sigma = 1, weight = False, interpolator = 'linear', distribuition = 'normal', plot3d = False, seed=None, hold = False):
"""
Return an error area between a analitic function and a estimated discretization from a distribuition.
Parameters
----------
nest: ndarray, int, optional
The array of the estimation points (e.g. nest = [100,200,300,400,500]).
if nest = None:
nest = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140,
150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 300,
350, 400, 450, 500, 600, 700, 800, 900, 1000, 1500, 2000,
2500, 3000, 3500, 4000, 4500, 5000]
Defaut is None.
data: int, optional
If data > 0, a randon data will be inserted insted analitcs data.
Defaut is 0.
outlier: int, optional
Is the point of an outlier event, e.g outlier = 50 will put an event in -50 and +50 if mu = 0.
Defaut is 0
kinds: str or array, optional
specifies the kind of distribuition to analize.
('Linspace', 'CDFm', 'PDFm', 'iPDF1', 'iPDF2', 'all').
Defaut is 'all'.
axis: str, optional
specifies the x axis to analize
('probability', 'derivative', '2nd_derivative', 'X').
Defaut is 'probability'.
ROI: int, optional
Specifies the number of regions of interest.
Defaut is 20.
mu: int, optional
Specifies the mean of distribuition.
Defaut is 0.
sigma: int, optional
Specifies the standard desviation of a distribuition.
Defaut is 1.
weight: bool, optional
if True, each ROI will have a diferent weight to analyze.
Defaut is False
interpolator: str, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of zeroth, first, second or third order) or as an
integer specifying the order of the spline interpolator to use.
Default is 'linear'.
distribuition: str, optional
Select the distribuition to analyze.
('normal', 'lognormal')
Defaut is 'normal'
plot: bool, optional
If True, a plot will be ploted with the analyzes in 3d with Nest x error x axis
If False, a 2d plot will be ploted with Nest x Area
Defaut is False
hold: bool, optional
If False, a new a plot will be ploted in a new figure, else, a plot
will be ploted in the same figure.
Defaut is False.
Returns
-------
a,b,c
return the number of estimation points, error area and distribuition if plot3 is True
"""
#nest1 = np.concatenate([list(range(10,250,10)),list(range(250,550,50)),list(range(600,1100,100)),list(range(1500,5500,500))])
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from distAnalyze import diffArea
if nest is None:
nest = np.concatenate([list(range(10,250,10)),list(range(250,550,50)),list(range(600,1100,100)),list(range(1500,5500,500))])
if seed is not None:
np.random.set_state(seed)
else:
seed = np.random.get_state()
if kinds == 'all':
kinds = ['Linspace', 'CDFm', 'PDFm', 'iPDF1', 'iPDF2']
elif type(kinds) == str:
kinds = [kinds]
probROIord = {}
areaROIord = {}
area = {}
for n in nest:
area[n],[probROIord[n],areaROIord[n]] = diffArea(n, outlier, data, kinds, axis, ROI, mu, sigma, weight, interpolator, distribuition, seed, plot = False)
#x = np.sort(nest*ROI) #Nest
#y = np.array(list(probROIord[nest[0]][list(probROIord[nest[0]].keys())[0]])*len(nest)) #Prob
area2 = {kinds[0]:[]}
for k in range(len(kinds)):
area2[kinds[k]] = []
for n in nest:
area2[kinds[k]].append(area[n][k])
x,y = np.meshgrid(nest,list(probROIord[nest[0]][list(probROIord[nest[0]].keys())[0]]))
area = area2
# =============================================================================
# z = {} #error
#
# for k in kinds:
# z[k] = []
# for i in nest:
# z[k].append(areaROIord[i][k])
# z[k] = np.reshape(np.concatenate(z[k]),x.shape,'F')
# =============================================================================
if plot3d:
fig = plt.figure()
ax = fig.gca(projection='3d')
z = {} #error
for k in kinds:
z[k] = []
for i in nest:
z[k].append(areaROIord[i][k])
z[k] = np.reshape(np.concatenate(z[k]),x.shape,'F')
ax.plot_surface(x,y,np.log10(z[k]),alpha = 0.4, label = k, antialiased=True)
ax.set_xlabel('Nº of estimation points', fontsize = 20)
ax.set_xticks(nest)
ax.set_ylabel(axis, fontsize = 20)
ax.zaxis.set_rotate_label(False)
ax.set_zlabel('Sum of errors', fontsize = 20, rotation = 90)
ax.view_init(20, 225)
plt.draw()
#ax.yaxis.set_scale('log')
plt.legend(prop = {'size':25}, loc = (0.6,0.5))
ax.show()
return x,y,np.log10(z[k])
else:
if not hold:
plt.figure(figsize = (12,8),dpi = 100)
for k in kinds:
plt.plot(nest,area[k], 'o-', label = k)
plt.xlabel('Nº of estimation points', fontsize = 30)
plt.ylabel('Error', fontsize = 30)
plt.legend(prop = {'size':18})
plt.yscale('log')
plt.tick_params(labelsize = 18)
plt.tight_layout()
#plt.savefig("/media/rafael/DiscoCompartilhado/Faculdade/Bolsa - Atlas/KernelDensityEstimation-Python/Kernel-Discretization-Processes/Figures_log/error_sigma_%.2f_interpolator_%s.png"%(sigma,interpolator))
return nest, area
# =============================================================================
# x, y = np.meshgrid(nest,sigma)
#
# z = {}
# kinds = ['Linspace', 'CDFm', 'PDFm', 'iPDF1', 'iPDF2']
# for k in kinds:
# z[k] = []
# for i in range(len(sigma)):
# z[k].append(area2[i][k])
# z[k] = np.reshape(np.concatenate(z[k]),x.shape)
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# for k in kinds:
# ax.plot_surface(x,y,np.log10(z[k]),alpha = 0.4, label = k, antialiased=True)
#
# =============================================================================
def PDF(pts,mu,sigma, distribuition, outlier = 0, data = 0, seed = None):
from scipy.stats import norm, lognorm
import numpy as np
from scipy.interpolate import interp1d
from someFunctions import ash
eps = 5e-5
if distribuition == 'normal':
outlier_inf = outlier_sup = outlier
if not data:
inf, sup = norm.interval(0.9999, loc = mu, scale = sigma)
X1 = np.linspace(inf-outlier,mu,int(1e6))
Y1 = norm.pdf(X1, loc = mu, scale = sigma)
interp = interp1d(Y1,X1)
y1 = np.linspace(Y1[0],Y1[-1],pts//2+1)
x1 = interp(y1)
X2 = np.linspace(mu,sup+outlier,int(1e6))
Y2 = norm.pdf(X2, loc = mu, scale = sigma)
interp = interp1d(Y2,X2)
y2 = np.flip(y1,0)
x2 = interp(y2)
else:
np.random.set_state(seed)
d = np.random.normal(mu,sigma,data)
inf,sup = min(d)-outlier_inf,max(d)+outlier_sup
#yest,xest = np.histogram(d,bins = 'fd',normed = True)
xest,yest = ash(d)
xest = np.mean(np.array([xest[:-1],xest[1:]]),0)
M = np.where(yest == max(yest))[0][0]
m = np.where(yest == min(yest))[0][0]
interpL = interp1d(yest[:M+1],xest[:M+1], assume_sorted = False, fill_value= 'extrapolate')
interpH = interp1d(yest[M:],xest[M:], assume_sorted= False, fill_value='extrapolate')
y1 = np.linspace(yest[m]+eps,yest[M],pts//2+1)
x1 = interpL(y1)
y2 = np.flip(y1,0)
x2 = interpH(y2)
elif distribuition == 'lognormal':
outlier_inf = 0
outlier_sup = outlier
inf, sup = lognorm.interval(0.9999, sigma, loc = 0, scale = np.exp(mu))
inf = lognorm.pdf(sup, sigma, loc = 0, scale = np.exp(mu))
inf = lognorm.ppf(inf, sigma, loc = 0, scale = np.exp(mu))
if not data:
mode = np.exp(mu - sigma**2)
X1 = np.linspace(inf-outlier_inf,mode,int(1e6))
Y1 = lognorm.pdf(X1, sigma, loc = 0, scale = np.exp(mu))
interp = interp1d(Y1,X1)
y1 = np.linspace(Y1[0],Y1[-1],pts//2+1)
x1 = interp(y1)
X2 = np.linspace(mode,sup+outlier_sup,int(1e6))
Y2 = lognorm.pdf(X2, sigma, loc = 0, scale = np.exp(mu))
interp = interp1d(Y2,X2)
y2 = np.flip(y1,0)
x2 = interp(y2)
else:
np.random.set_state(seed)
d = np.random.lognormal(mu,sigma,data)
#inf,sup = min(d)-outlier_inf,max(d)+outlier_sup
#yest,xest = np.histogram(d,bins = 'fd',normed = True)
#xest = np.mean(np.array([xest[:-1],xest[1:]]),0)
xest,yest = ash(d)
yest = yest[xest<sup]
xest = xest[xest<sup]
M = np.where(yest == max(yest))[0][0]
m = np.where(yest == min(yest))[0][0]
interpL = interp1d(yest[:M+1],xest[:M+1], fill_value = 'extrapolate')
interpH = interp1d(yest[M:],xest[M:])
y1 = np.linspace(yest[m]+eps,yest[M],pts//2+1)
x1 = interpL(y1)
y2 = np.flip(y1,0)
x2 = interpH(y2)
X = np.concatenate([x1[:-1],x2])
Y =
|
np.concatenate([y1[:-1],y2])
|
numpy.concatenate
|
import numpy as np
import math
import pickle
import matplotlib.pyplot as plt
# matplotlib.use('Agg') # Added for plotting
plt.style.use('seaborn-paper') # Added for plotting
def compute_rbf_encoding(input, rbf_center, rbf_sig=0.15):
"""
Compute the rbf encoding from trained rbf center to new input
m := n_frames * n_test_seq
n := n_neurons * n_train_seq
:param input: (n_feature, m)
:param rbf_center:(n_feature, n)
:param rbf_sig:
:return:
"""
# f_it = np.zeros((input.shape[1], rbf_center.shape[1]))
# for m in range(input.shape[1]):
# for n in range(rbf_center.shape[1]):
# f_it_tmp = input[:, m] - rbf_center[:, n]
# f_it_tmp = np.exp(-np.linalg.norm(f_it_tmp, ord=2, axis=2) ** 2 / 2 / rbf_sig ** 2)
# f_it[m, n] = f_it_tmp
# compute difference between rbf center and input for each frame/neurons
f_it = [[input[:, m] - rbf_center[:, n] for m in range(input.shape[1])] for n in range(rbf_center.shape[1])]
# apply gaussian activation to each
f_it = np.exp(-np.linalg.norm(f_it, ord=2, axis=2)**2 / 2 / rbf_sig**2)
return f_it
def reshape_rbf(rbf, m_frame, m_test, n_neuron, n_train):
"""
this function reshape the matrix from (n, m) dimension to (m_frame, m_test, n_neuron, n_train)
with
m := m_frame * m_test_seq
n := n_neuron * n_train_seq
Note: usually n_frame = n_neuron
:param rbf:
:return:
"""
# declare new array
F_IT_rbf = np.zeros((m_frame, m_test, n_neuron, n_train))
# reshape array
for n in range(n_train):
for m in range(m_test):
n_start = n * n_neuron
m_start = m * m_frame
F_IT_rbf[:, m, :, n] = rbf[n_start:n_start+n_neuron, m_start:m_start+m_frame]
return F_IT_rbf
def normalize_fft_kernel(s, A, alpha, beta, epsilon=.1):
"""
normalize the fourrier kernel as
norm := (1 - b)/a + b / (e + sum(s))
s_norm := A * s * norm
The goal is to flatten the integral over the field since the intergration may add to the amplitude due to outliers
from the peak.
This is mainly visible when a snapchot neurons fires to multiple frame since they appear several times in the
sequence (think of neutral frames)
beta allows to blend between a fully 1/alpha norm to a fully 1/sum(s)
epsilon is here to avoid a division by zero but could be also fine tune
the amplitude A allows to boost the kernel after normalization
:return:
"""
# OLD -> made no mathematical sense to use the squared root
# norm_Sf_tmp = np.squeeze(Samp * Sff_tmp / (2 + (np.sum(Sff_tmp)) ** 0.5))
norm = ((1 - beta) / alpha) + (beta / (epsilon + np.sum(s)))
s_norm = np.squeeze(A * s * norm)
return s_norm
def remove_neutral_frames(F_IT, config):
neutral_index = config['neutral_frames_idx']
for i in neutral_index:
F_IT[i[0]:i[1]] = 0
F_IT[:, i[0]:i[1]] = 0
return F_IT
def reverse_sequence(seq, n_seq, seq_length):
rev_seq = np.zeros(seq.shape)
for s in range(n_seq):
start = s * seq_length
chopped = seq[:, start:start+seq_length]
print("shape chopped", np.shape(chopped))
flip = np.flip(seq[:, start:start+seq_length], axis=1)
rev_seq[:, start:start+seq_length] = flip
return rev_seq
def plot_integral(integral, save_name):
max_integral = np.amax(integral)
plt.figure()
plt.plot(integral)
plt.plot([80, 80], [0, max_integral])
plt.plot([160, 160], [0, max_integral])
plt.savefig(save_name)
def compute_expression_neurons(F_IT, config, do_plot=1):
print("[dface] Input shape F_IT", np.shape(F_IT))
# remove neutral frames so the neural field is acting only on the expression sequence
if config.get('remove_neutral_frames') is not None:
F_IT = remove_neutral_frames(F_IT, config)
seq_length = config['batch_size']
n_test_seq = F_IT.shape[1] // seq_length
n_train_seq = F_IT.shape[0] // seq_length
print("n_train_seq", n_train_seq)
print("n_test_seq", n_test_seq)
# reshape R_IT_rbfc from (n, m) to (m_frame, m_test, n_neuron, n_train)
F_IT = reshape_rbf(F_IT, seq_length, n_test_seq, seq_length, n_train_seq)
print("[dface] re-shape F_IT", np.shape(F_IT))
# Initialize Neural Field
"""
Parameters to modify to fine tune the neural field, in order of importance
Aker: Amplitude of the kernel, you should try to make your field activated on the diagonal, the more you will add,
the more the diagonal will start to lurch
Bker: offset of the amplitude kernel, it allows to make the kernel inhibitroy outside its peak
dker: tune the speed of the traveling pulse, it will correct the lurching, to test sequence selectivity,
set the parameter to negatif and control that your fiedl is well sequence selective
winh: this how strong the pattern will inhibit the others
Samp/s_alpha/s_beta: amplitude and normalization of the fourier kernel, in our case an amplitude of the kernel
around 75 was good, the goal is to try to flatten the kernel but keeping it's normalization amplitude somehow
similar across the fields
"""
Aker = 1.15 # 1.1 kernel amplitude
Bker = -0.5 # kernel offset
dker = 3.1 # asymmetric shift
winh = 0.5 # cross-pattern inhibition
Samp = 25 # amplitude factor of normalization
s_alpha = 40 # minimum normalization factor
s_beta = 0.4 # blending parameters
"""
Following parameters were not touched to tune the neural field
"""
sigker = 2.5 # interaction kernel
h = 1 # resting level
tau = 5 # time constant
sigs = 1.8 # gaussian lurring of input distribution
# ----------------------------------------------------------
# ---------------------- AMARY FIELD -----------------------
# ----------------------------------------------------------
# create the interaction kernel
xs = np.arange(0, seq_length)
xsft = math.floor(seq_length / 2)
hsft = 0 * xs
hsft[xsft] = 1
hsft = hsft.reshape(1, hsft.shape[0])
# build interaction kernel
wx = Aker * np.exp(-(xs - xsft - dker) ** 2 / 2 / sigker ** 2) + Bker
wx = wx.reshape(1, wx.shape[0])
wx = np.round(wx, 4)
wx = np.fft.ifft(np.multiply(np.fft.fft(wx), np.conj(np.fft.fft(hsft)))) # trick to center the kernel
fftwx = np.fft.fft(np.transpose(wx), axis=0)
# create input smoothing kernel
sx = np.exp(-(xs - xsft) ** 2 / 2 / sigs ** 2)
sx = np.fft.ifft(np.multiply(np.fft.fft(sx), np.conj(np.fft.fft(hsft)))) # trick to center the kernel
# fourrier transform of the input kernel
fftsx = np.fft.fft(np.transpose(sx), axis=0)
# initialize neural field and define UF, UFA, Sf_tmp
Uf0 = - np.ones((seq_length, n_train_seq)) * h
UF = np.zeros((seq_length, n_train_seq, seq_length))
UFA = np.zeros((seq_length, n_train_seq, seq_length, n_test_seq))
Sf_tmp = np.zeros((seq_length, n_train_seq, seq_length))
# iterate neural field
ODIM = 1 - np.identity(n_train_seq)
sf_integral = []
sf_integral_norm = []
for m in range(n_test_seq): # number of testing condition
UF[:, :, 0] = Uf0 # initialization for time 1
for n in range(seq_length):
# S_tmp = np.squeeze(F_IT[n, m, :, :]) # -> 80x3
S_tmp =
|
np.squeeze(F_IT[:, m, n, :])
|
numpy.squeeze
|
#!/usr/bin/env python
# coding: utf-8
# ## Questionário 61 (Q61)
#
#
# Orientações:
#
# - Registre suas respostas no questionário de mesmo nome no SIGAA.
# - O tempo de registro das respostas no questionário será de 10 minutos. Portanto, resolva primeiro as questões e depois registre-as.
# - Haverá apenas 1 (uma) tentativa de resposta.
# - Submeta seu arquivo-fonte (utilizado para resolver as questões) em formato _.ipynb_ pelo SIGAA anexando-o à Tarefa denominada "Envio de arquivo" correspondente ao questionário.
#
# *Nota:* o arquivo-fonte será utilizado apenas como prova de execução da tarefa. Nenhuma avaliação será feita quanto ao estilo de programação.
#
# <hr>
# In[2]:
import sympy as sym
from sympy import Symbol, pprint
import numpy as np
import matplotlib.pyplot as plt
# **Questão 1.** Observe a figura abaixo e julgue os itens a seguir.
#
# ```{figure} ../figs/q/q61.png
# ---
# width: 300px
# name: convex
# ---
# ```
#
# i) existe uma função convexa entre as quatro plotadas.
#
# ii) uma entre as funções plotadas possui convexidade parcial.
#
# iii) duas entre as funções plotadas não são convexas.
#
# Assinale a alternativa correta.
#
# A. São corretos i) e ii), apenas.
#
# B. Apenas i) é correto.
#
# C. São corretos i) e iii), apenas.
#
# D. n.d.a.
# In[7]:
plt.figure(figsize=(14,4))
plt.subplot(141)
x1 = np.linspace(-10, 10, 100)
plt.plot(np.sin(x1),c='r')
plt.xticks([]); plt.yticks([]);
plt.title('(a)')
plt.subplot(142)
x2 = np.linspace(-2, 2, 100)
plt.plot(x2, np.exp(x2)*10*np.sin(6*x2))
plt.xticks([]); plt.yticks([]);
plt.title('(b)')
plt.subplot(143)
x3 = np.arange(-100, 100, 1)
plt.plot(x3, x3**2, c='orange')
plt.xticks([]); plt.yticks([]);
plt.title('(c)')
plt.subplot(144)
x4 = np.arange(-100, 0, 1)
plt.plot(x4, x4**3,c='m')
plt.xticks([]); plt.yticks([]);
plt.title('(d)')
plt.show()
# <hr>
#
# ## Gabarito
# Alternativa **A**
# <hr>
#
# **Questão 2.** A função a seguir simula a curva do _potencial de ação_ de uma membrana:
#
# $$P(x) = \dfrac{1.0}{(x - 0.5)^2 + 0.01} - \dfrac{1.0}{(x - 0.8)^2 + 0.04} - 70.$$
#
# Use computação simbólica para calcular uma aproximação para $P'(x=0)$ e assinale a alternativa correta.
#
# A. -67.62
#
#
# B. 0.25
#
#
# C. 11.33
#
#
# D. 0.00
#
# Nota: Use `sympy.subs(x,x0)`.
# In[3]:
x1 =
|
np.random.normal(0,1,10)
|
numpy.random.normal
|
#!/usr/bin/env python
import numpy as np
import sklearn.metrics
import sklearn.metrics.pairwise
from scipy.stats.stats import pearsonr
from sklearn.metrics.cluster import normalized_mutual_info_score
import matplotlib.pyplot as plt; plt.rcdefaults()
import matplotlib.pyplot as plt
from sklearn.preprocessing import KBinsDiscretizer
from opt_gaussian import *
import pandas as pd
import numpy as np
import ppscore as pps
# compute normalized HSIC between X,Y
# if sigma_type = mpd, it uses median of pairwise distance
# if sigma_type = opt, it uses optimal
def ℍ(X,Y, X_kernel='Gaussian', Y_kernel='Gaussian', sigma_type='opt'):
def get_γ(X,Y, sigma_type):
if sigma_type == 'mpd':
σ = np.median(sklearn.metrics.pairwise_distances(X)) # find a σ via optimum
else:
optimizer = opt_gaussian(X,Y, Y_kernel=Y_kernel)
optimizer.minimize_H()
σ = optimizer.result.x[0]
if σ < 0.01: σ = 0.05 # ensure that σ is not too low
γ = 1.0/(2*σ*σ)
return γ
if len(X.shape) == 1: X = np.reshape(X, (X.size, 1))
if len(Y.shape) == 1: Y = np.reshape(Y, (Y.size, 1))
n = X.shape[0]
if X_kernel == 'linear': Kᵪ = X.dot(X.T)
if Y_kernel == 'linear': Kᵧ = Y.dot(Y.T)
if X_kernel == 'Gaussian':
γ = get_γ(X,Y, sigma_type)
Kᵪ = sklearn.metrics.pairwise.rbf_kernel(X, gamma=γ)
if Y_kernel == 'Gaussian':
γ = get_γ(X, Y, sigma_type)
Kᵧ = sklearn.metrics.pairwise.rbf_kernel(Y, gamma=γ)
#np.fill_diagonal(Kᵪ, 0)
#np.fill_diagonal(Kᵧ, 0)
HKᵪ = Kᵪ - np.mean(Kᵪ, axis=0) # equivalent to HKᵪ = H.dot(Kᵪ)
HKᵧ = Kᵧ - np.mean(Kᵧ, axis=0) # equivalent to HKᵧ = H.dot(Kᵧ)
Hᵪᵧ= np.sum(HKᵪ*HKᵧ)
Hᵪ = np.linalg.norm(HKᵪ) # equivalent to np.sqrt(np.sum(KᵪH*KᵪH))
Hᵧ = np.linalg.norm(HKᵧ) # equivalent to np.sqrt(np.sum(KᵧH*KᵧH))
H = Hᵪᵧ/( Hᵪ * Hᵧ )
return H
def double_center(Ψ):
HΨ = Ψ - np.mean(Ψ, axis=0) # equivalent to Γ = Ⲏ.dot(Kᵧ).dot(Ⲏ)
HΨH = (HΨ.T - np.mean(HΨ.T, axis=0)).T
return HΨH
if __name__ == '__main__':
n = 300
# Perfect Linear Data
dat = np.random.rand(n,1)
plinear_data = np.hstack((dat,dat)) + 1
df = pd.DataFrame(data=plinear_data, columns=["x", "y"])
enc = KBinsDiscretizer(n_bins=10, encode='ordinal')
XP_data_nmi = np.squeeze(enc.fit_transform(np.atleast_2d(plinear_data[:,0]).T))
enc = KBinsDiscretizer(n_bins=10, encode='ordinal')
YP_data_nmi = np.squeeze(enc.fit_transform(np.atleast_2d(plinear_data[:,1]).T))
plinear_pc = np.round(pearsonr(plinear_data[:,0], plinear_data[:,1])[0], 2)
plinear_nmi = np.round(normalized_mutual_info_score(XP_data_nmi, YP_data_nmi),2)
plinear_hsic = np.round(ℍ(plinear_data[:,0], plinear_data[:,1]),2)
plinear_pps = np.round(pps.score(df, "x", "y")['ppscore'],2)
print('Linear Relationship:')
print('\tCorrelation : ', plinear_pc)
print('\tNMI : ', plinear_nmi)
print('\tpps : ', plinear_pps)
print('\tHSIC : ', plinear_hsic)
# Linear Data
dat = np.random.rand(n,1)
linear_data = np.hstack((dat,dat)) + 0.04*
|
np.random.randn(n,2)
|
numpy.random.randn
|
__author__ = 'Killua'
import numpy as np
class OLS:
""" Ordinary Least Square
"""
# variable
beta = None # regression coefficient
y = None # regression output
data = None # regression input
intercept = False # whether to add intercept term
# methods
def __init__(self, y=None, data=None, intercept=False):
""" Construction method for OLS class
:param y: regression output
:param data: regression input in shape of (n_samples, n_features)
"""
y = np.matrix(y)
data = np.matrix(data)
self.y = y
self.data = data
self.intercept = intercept
if y is not None and data is not None:
self.fit(y, data)
def fit(self, y, data):
""" fitting least square coefficient
:param y: regression output
:param data: regression input data
:return: regression coefficient
"""
# Check input
if y.shape[0] != data.shape[0]:
raise ValueError("Size mismatch between regression input and output")
# Process
y = np.matrix(y)
self.y = y
if self.intercept:
data = np.lib.pad(data, ((0, 0), (1, 0)), "constant", constant_values=1)
data = np.matrix(data)
self.data = data
# Fit
self.beta = np.linalg.inv(np.transpose(data) * data) *
|
np.transpose(data)
|
numpy.transpose
|
#!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2019 Mundi Web Services
# Licensed under the 3-Clause BSD License; you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# https://opensource.org/licenses/BSD-3-Clause
#
# Author : Dr. <NAME>
#
# Contact email: <EMAIL>
# =============================================================================
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import folium
from folium import plugins
import datetime
import xarray
import branca
import geojsoncontour
from mpl_toolkits.basemap import Basemap
import ipywidgets as widgets
import ftputil
import pandas as pd
import os
from ipywidgets import interact, interactive, fixed, interact_manual
import math
from pathlib import Path
from collections import Iterable
from PIL import Image
### Cmems Functions ##############################################################
class Cmems:
################################# MODEL AND SATELLITE PRODUCTS #################################################
################################################################################################################
############################################################################################
#------------------------------------------------------------------------------------------
# DOWNLOAD THE FILE (For MODEL AND SATELLITE PRODUCTS)
#------------------------------------------------------------------------------------------
###########################################################################################
@staticmethod
def download_Product(user,password,Product):
########## CASE 1 (Product=='Model') : Get the list of all model products offered by the cmems catalog
if Product=='Model':
Model_products=[]
# connect to CMEMS FTP
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir('Core')
product_list=[]
product_list = ftp_host.listdir(ftp_host.curdir)
for product in product_list:
items = product.split('_')
# conditions to select only model products
if 'OBSERVATIONS' not in items and 'MULTIOBS' not in items and 'INSITU' not in items:
Model_products.append(product)
data = {'MODEL PRODUCTS': []}
#-----------------------------------------------------------------------------------------------------
########## CASE 2 (Product=='Satellite') : Get the list of all satellite products offered by the cmems catalog
elif Product=='Satellite':
Model_products=[]
# connect to CMEMS FTP
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir('Core')
product_list=[]
product_list = ftp_host.listdir(ftp_host.curdir)
for product in product_list:
items = product.split('_')
# conditions to select only satellite products
if 'MULTIOBS' in items or 'OBSERVATIONS' in items and 'INSITU' not in items:
Model_products.append(product)
data = {'SATELLITE OBSERVATION PRODUCTS': []}
#-----------------------------------------------------------------------------------------------------
########## Initialize the widgets ------------------------------------------------------------------
style = {'description_width': 'initial'}
if Product=='Model':
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=Model_products,
value=Model_products[4],
description='Product:',
disabled=False)
elif Product=='Satellite':
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=Model_products,
value=Model_products[52],
description='Product:',
disabled=False)
product_name=x_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name)
product_list2 = ftp_host.listdir(ftp_host.curdir)
y_widget = widgets.RadioButtons(layout={'width': 'initial'},options=product_list2,value=product_list2[0],description='Available data type :',style=style)
product_name2=y_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2)
product_list3 = ftp_host.listdir(ftp_host.curdir)
z_widget = widgets.Dropdown(layout={'width': 'initial'},options=product_list3,value=product_list3[3],description='Year:')
product_name3=z_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3)
product_list4 = ftp_host.listdir(ftp_host.curdir)
w_widget = widgets.Dropdown(layout={'width': 'initial'},options=product_list4,value=product_list4[5],description='Month:')
product_name4=w_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3+'/'+product_name4)
product_list5 = ftp_host.listdir(ftp_host.curdir)
i_widget = widgets.Dropdown(layout={'width': 'initial'},options=product_list5,value=product_list5[3],description='File:')
#-----------------------------------------------------------------------------------------------------
############# Define a function that updates the content of (y_widget,z_widget,w_widget,i_widget) based on what we select for x_widget
def update(*args):
product_name=x_widget.value
# Get the list of the available data offered by the selected product
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name)
product_list2 = ftp_host.listdir(ftp_host.curdir)
# Get the content of y_widget based on x_widget.value
y_widget.options=product_list2
product_name2=y_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2)
product_list3 = ftp_host.listdir(ftp_host.curdir)
# Get the content of the widgets based on our selection for different cases:
# case 1 : Get the content of the widgets based on the value of y_widget
if 'nc' in product_list3[1]:
z_widget.options=product_list3
z_widget.description='File'
w_widget.options=['']
i_widget.options=['']
w_widget.description='option'
i_widget.description='option'
netcdf_files=[]
netcdf_files=product_list3
else:
z_widget.options=product_list3
z_widget.description='Year'
product_name3=z_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3)
product_list4 = ftp_host.listdir(ftp_host.curdir)
# case 2 : Get the content of the widgets based on the value of z_widget
if 'nc' in product_list4[1]:
w_widget.options=product_list4
w_widget.description='File'
i_widget.options=['']
netcdf_files=[]
netcdf_files=product_list4
else:
w_widget.options=product_list4
w_widget.description='Month'
product_name4=w_widget.value
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir("Core"+'/'+product_name +'/'+product_name2+'/'+product_name3+'/'+product_name4)
product_list5 = ftp_host.listdir(ftp_host.curdir)
# case 3 : Get the content of the widgets based on the value of w_widget
if 'nc' in product_list5[1]:
i_widget.options=product_list5#['List of netCdf Files']
i_widget.description='File'
netcdf_files=[]
netcdf_files=product_list5
else:
i_widget.options=product_list5
i_widget.description='day'
return (z_widget.value,w_widget.value,i_widget.value)
# update the content of the widgets according to our selection
x_widget.observe(update,'value')
y_widget.observe(update,'value')
z_widget.observe(update,'value')
w_widget.observe(update,'value')
####################-------------------------------------------------------------------------------------------------
######## Define the download procedure using the ftp protocol
def random_function(x, y, z, w, i):
###### get the downloading path
path=[x,y,z,w,i]
path_new=[]
file=[]
for i in path:
if i != 'List of netCdf Files' and i != '':
path_new.append(i)
file=path_new[-1]
path2 = "Core"
for i in range(len(path_new)):
path2 = path2+'/'+str(path_new[i])
filepath2= path2
ncdf_file_name2=file
#-----------------------------------------
# define the downloading button
button = widgets.Button(description='''Download The File''')
out = widgets.Output()
def on_button_clicked(_):
# "linking function with output"
with out:
#try:
output_directory=[]
# set the output_directory of the file
if Product=='Model':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/01_Model_product'
else:
output_directory=os.getcwd()+'/cmems_data/01_Model_product'
elif Product=='Satellite':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/02_Satellite_product'
else:
output_directory=os.getcwd()+'/cmems_data/02_Satellite_product'
#--------------------------------------------------------------------
# creating a folder using the output_directory
p = Path(output_directory)
p.mkdir(parents=True, exist_ok=True)
# downloading the file using the ftp protocol
host = 'nrt.cmems-du.eu'
print(f"Downloading The File '{ncdf_file_name2}' in {output_directory}")
with ftputil.FTPHost(host, user, password) as ftp_host:
cwd = os.getcwd()
os.chdir(output_directory)
try:
ftp_host.download(filepath2, ncdf_file_name2) # remote, local
print("Done")
except:
print("Downloading can't be done for this file, please run the function again and choose a netCDF file")
os.chdir(cwd)
#except:
#print("Downloading can't be done, please run the function again and choose a netCDF file")
return(ncdf_file_name2)
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
aa=widgets.VBox([button,out])
return(aa)
#----------------------------------------------------------------------------------------
# display the interaction between the widgets
display(pd.DataFrame(data=data))
interact(random_function,
x = x_widget,
y = y_widget,
z = z_widget,
w = w_widget,
i = i_widget);
return(update)
###############################################################################################################
#--------------------------------------------------------------------------------------------
# READ THE DOWNLOADED FILE (For MODEL AND SATELLITE PRODUCTS)
#--------------------------------------------------------------------------------------------
###############################################################################################################
@staticmethod
def read_File(update,Product):
# get the name of the selected file
for i in update():
if 'nc' in i:
file=i
# get the current directory of the file
if Product=='Model':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/01_Model_product'
else:
output_directory=os.getcwd()+'/cmems_data/01_Model_product'
elif Product=='Satellite':
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/02_Satellite_product'
else:
output_directory=os.getcwd()+'/cmems_data/02_Satellite_product'
# reading the netcdf file
dataset = output_directory+f'/{file}'
ds = xarray.open_dataset(dataset)
# get the list of the parameters of the netcdf file
list_parm_deleted=['time','lat','lon','depth','grid_mapping','x','y','longitude','latitude','LONGITUDE','LATITUDE','time_bnds']
full_list=list(ds.variables)
selected_list=[]
selected_list_name=[]
for i in full_list:
if not i in list_parm_deleted:
selected_list.append(i)
try:
selected_list_name.append(ds[i].attrs['standard_name'])
except:
try:
selected_list_name.append(ds[i].attrs['long_name'])
except:
selected_list_name.append(i)
return(ds,selected_list,selected_list_name,dataset)
################################################################################################################
#--------------------------------------------------------------------------------------------
# DISPLAY THE PARAMETERS OF THE FILE (For MODEL PRODUCTS)
#--------------------------------------------------------------------------------------------
###############################################################################################################
@staticmethod
def display_param_model(ds,selected_list,selected_list_name):
########## Initialize the widgets ------------------------------------------------------------------
dictionary = dict(zip(selected_list_name, selected_list))
if len(selected_list_name) < 4:
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=selected_list_name,
value=selected_list_name[0],
description='Parameters:',
disabled=False)
else:
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=selected_list_name, #selected_list,
value=selected_list_name[4], #selected_list[0],
description='Parameters:',
disabled=False)
style = {'description_width': 'initial'}
varb=dictionary[x_widget.value]
if len(ds[dictionary[x_widget.value]].shape) < 4:
n_widget = widgets.Label(value="This parameter does not allow a depth analysis")
y_widget = widgets.Dropdown(layout={'width': 'initial'},description='Longitude:')
z_widget = widgets.Dropdown(layout={'width': 'initial'},description='Latitude:')
else:
n_widget = widgets.Label(value="Please select a specific (Longitude,Latitude) to build also a depth analysis figure")
y_widget = widgets.Dropdown(layout={'width': 'initial'},description='Longitude:')
z_widget = widgets.Dropdown(layout={'width': 'initial'},description='Latitude:')
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[0]
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))[0]
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[0]
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))[0]
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['x'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['y'][:]))))[0]
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))
try:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))[400]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))[550]
except:
y_widget.value=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))[0]
z_widget.value=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))[0]
#---------------------------------------------------------------------------------------------------
############# Define a function that updates the content of (y_widget,z_widget,n_widget) based on what we select for x_widget
def update_2(*args):
param_name=dictionary[x_widget.value]
varb=param_name
if len(ds[varb].shape) < 4:
y_widget.options=['']
z_widget.options=['']
n_widget.value="This parameter does not allow a depth analysis"
variable = ds.variables[varb][:]
vmin=variable[0,:,:].min()
vmax=variable[0,:,:].max()
else:
n_widget.value="Please select a specific (Longitude,Latitude) to build also a depth analysis figure"
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['lon'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['lat'][:]))))
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['longitude'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['latitude'][:]))))
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
y_widget.options=sorted(list(set(np.asarray(ds[varb]['x'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['y'][:]))))
else:
y_widget.options=sorted(list(set(np.asarray(ds[varb]['LONGITUDE'][:]))))
z_widget.options=sorted(list(set(np.asarray(ds[varb]['LATITUDE'][:]))))
variable = ds.variables[varb][:]
vmin=variable[0,0,:,:].min()
vmax=variable[0,0,:,:].max()
return(vmin,vmax)
# update the content of the widgets according to our selection
x_widget.observe(update_2,'value')
n_widget.observe(update_2,'value')
y_widget.observe(update_2,'value')
z_widget.observe(update_2,'value')
#--------------------------------------------------------------------------------------------------------
####### configure the display according to the selected parameter
def random_function(x,n,y,z):
param_name=dictionary[x]
param_lon=y
param_lat=z
style = {'description_width': 'initial'}
button = widgets.Button(description="Display The Parameter",style=style)
out = widgets.Output()
def on_button_clicked(_):
# "linking function with output"
with out:
try:
varb=param_name
var_lon=param_lon
var_lat=param_lat
# define the longitude (max,min) and latitude (max,min) for the displaying
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
lon_max=ds.variables['x'][:].max()
lon_min=ds.variables['x'][:].min()
lat_max=ds.variables['y'][:].max()
lat_min=ds.variables['y'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['x'][:]
lats = ds.variables['y'][:]
else:
lon_max=ds.variables['lon'][:].max()
lon_min=ds.variables['lon'][:].min()
lat_max=ds.variables['lat'][:].max()
lat_min=ds.variables['lat'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['lon'][:]
lats = ds.variables['lat'][:]
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
lon_max=ds.variables['x'][:].max()
lon_min=ds.variables['x'][:].min()
lat_max=ds.variables['y'][:].max()
lat_min=ds.variables['y'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['x'][:]
lats = ds.variables['y'][:]
else:
lon_max=ds.variables['longitude'][:].max()
lon_min=ds.variables['longitude'][:].min()
lat_max=ds.variables['latitude'][:].max()
lat_min=ds.variables['latitude'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['longitude'][:]
lats = ds.variables['latitude'][:]
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
lon_max=ds.variables['x'][:].max()
lon_min=ds.variables['x'][:].min()
lat_max=ds.variables['y'][:].max()
lat_min=ds.variables['y'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['x'][:]
lats = ds.variables['y'][:]
else:
lon_max=ds.variables['LONGITUDE'][:].max()
lon_min=ds.variables['LONGITUDE'][:].min()
lat_max=ds.variables['LATITUDE'][:].max()
lat_min=ds.variables['LATITUDE'][:].min()
lon_max=np.asscalar(np.asarray(lon_max, dtype=np.float))
lon_min=np.asscalar(np.asarray(lon_min, dtype=np.float))
lat_max=np.asscalar(np.asarray(lat_max, dtype=np.float))
lat_min=np.asscalar(np.asarray(lat_min, dtype=np.float))
lons = ds.variables['LONGITUDE'][:]
lats = ds.variables['LATITUDE'][:]
if lon_min <-180 or lat_min < -90 or lon_max > 180 or lat_max >90:
lon_min,lat_min,lon_max,lat_max= (-180,-90,180,90)
#---------------------------------------------------------------------
# case 1 : display the selected parameter on a map without a depth analysis
if len(ds[varb].shape) < 3:
variable = ds.variables[varb][:]
try:
variable_title=ds[varb].attrs['standard_name']
except:
try:
variable_title=ds[varb].attrs['long_name']
except:
variable_title=varb
lon, lat = np.meshgrid(lons, lats)
plt.figure(figsize=(20,7))
plt.subplot(121)
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
x, y = map(lon, lat)
cs=map.contourf(x,y,variable[:,:],cmap=plt.cm.jet,vmin=variable[:,:].min(), vmax=variable[:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar=map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label(f'{variable_title}', fontsize=15)
if varb == 'thetao' or varb == 'bottomT':
plt.title('MODEL-PRODUCT [degrees_C]', fontsize=15)
else:
plt.title('MODEL-PRODUCT', fontsize=15)
plt.title('MODEL-PRODUCT', fontsize=20)
plt.show()
# case 2 : display the selected parameter on a map without a depth analysis
elif 2 < len(ds[varb].shape) < 4:
variable = ds.variables[varb][:]
try:
variable_title=ds[varb].attrs['standard_name']
except:
try:
variable_title=ds[varb].attrs['long_name']
except:
variable_title=varb
lon, lat = np.meshgrid(lons, lats)
plt.figure(figsize=(20,7))
plt.subplot(121)
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
x, y = map(lon, lat)
cs=map.contourf(x,y,variable[0,:,:],cmap=plt.cm.jet,vmin=variable[0,:,:].min(), vmax=variable[0,:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar=map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label(f'{variable_title}', fontsize=15)
if varb == 'thetao' or varb == 'bottomT':
plt.title('MODEL-PRODUCT [degrees_C]', fontsize=15)
else:
plt.title('MODEL-PRODUCT', fontsize=15)
plt.title('MODEL-PRODUCT', fontsize=20)
plt.show()
# case 3 : display the selected parameter on a map with a depth analysis
else:
variable = ds.variables[varb][:]
try:
variable_title=ds[varb].attrs['standard_name']
except:
try:
variable_title=ds[varb].attrs['long_name']
except:
variable_title=varb
lon, lat = np.meshgrid(lons, lats)
plt.figure(figsize=(30,7))
plt.subplot(131)
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
x, y = map(lon, lat)
cs=map.contourf(x,y,variable[0,0,:,:],cmap=plt.cm.jet,vmin=variable[0,0,:,:].min(), vmax=variable[0,0,:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar=map.colorbar(cs,location='bottom',pad="5%")
cbar.set_label(f'{variable_title}', fontsize=15)
if varb == 'thetao' or varb == 'bottomT':
plt.title('MODEL-PRODUCT (For Depth = 0) [degrees_C]', fontsize=15)
else:
plt.title('MODEL-PRODUCT (For Depth = 0)', fontsize=15)
# add the display of the depth analysis
plt.subplot(132)
# Get indexes for a Given Point (latitude = var_lat and longitude = var_lon)
if 'lon' in list(ds.variables):
if 'x' in list(ds.variables):
vd=np.where(np.asarray(ds[varb]['x'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['y'])[:] == var_lat)
lons_test=ds[varb]['x'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['y'][np.asscalar(np.asarray(list(vd2)))]
else:
vd=np.where(np.asarray(ds[varb]['lon'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['lat'])[:] == var_lat)
lons_test=ds[varb]['lon'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['lat'][np.asscalar(np.asarray(list(vd2)))]
elif 'longitude' in list(ds.variables):
if 'x' in list(ds.variables):
vd=np.where(np.asarray(ds[varb]['x'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['y'])[:] == var_lat)
lons_test=ds[varb]['x'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['y'][np.asscalar(np.asarray(list(vd2)))]
else:
vd=np.where(np.asarray(ds[varb]['longitude'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['latitude'])[:] == var_lat)
lons_test=ds[varb]['longitude'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['latitude'][np.asscalar(np.asarray(list(vd2)))]
elif 'LONGITUDE' in list(ds.variables):
if 'x' in list(ds.variables):
vd=np.where(np.asarray(ds[varb]['x'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['y'])[:] == var_lat)
lons_test=ds[varb]['x'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['y'][np.asscalar(np.asarray(list(vd2)))]
else:
vd=np.where(np.asarray(ds[varb]['LONGITUDE'])[:] == var_lon)
vd2=np.where(np.asarray(ds[varb]['LATITUDE'])[:] == var_lat)
lons_test=ds[varb]['LONGITUDE'][np.asscalar(np.asarray(list(vd)))]
lats_test=ds[varb]['LATITUDE'][np.asscalar(np.asarray(list(vd2)))]
indx_lat=np.asscalar(np.asarray(list(vd2)))
indx_lon=np.asscalar(np.asarray(list(vd)))
if lon_min-2 <-180 or lat_min-3 < -90 or lon_max+2 > 180 or lat_max+4.2 >90:
map = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, epsg=4326)
else:
map = Basemap(llcrnrlon=lon_min-2,llcrnrlat=lat_min-3,urcrnrlon=lon_max+2,urcrnrlat=lat_max+4.2, epsg=4326)
s= 200*np.ones(1)
if math.isnan(np.array([ds[varb][0,0,indx_lat,indx_lon]])) == True:
cs3=map.scatter(np.array([lons_test]),np.array([lats_test]) , c=np.array([0]),s=s,cmap=plt.cm.gist_gray)
else:
cs3=map.scatter(np.array([lons_test]),np.array([lats_test]) , c=np.array([ds[varb][0,0,indx_lat,indx_lon]]),s=s,cmap=plt.cm.jet,vmin=variable[0,0,:,:].min(), vmax=variable[0,0,:,:].max())
try:
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
except:
map.bluemarble()
cbar3=map.colorbar(cs3,location='bottom',pad="5%")
cbar3.set_label(f'{variable_title}', fontsize=15)
plt.title(f'Selected Point', fontsize=20)
plt.subplot(133)
ds[varb][0,:,indx_lat,indx_lon].plot.line(y='depth',ylim=(110,0),yincrease=False)
plt.show()
except:
try:
varb=param_name
if len(ds[varb].shape) < 3:
ds[varb].plot()
elif 2 < len(ds[varb].shape) < 4:
ds[varb][0,:,:].plot()
else:
ds[varb][0,0,:,:].plot()
except:
print ("Displaying doesn't work, please choose another parameter or product (example : BALTICSEA_ANALYSIS_FORECAST_PHY_003_006) ")
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
a=widgets.VBox([button,out])
return(a)
#----------------------------------------------------------------------------------------------------------------
# display the interaction between the widgets
interact(random_function,
x = x_widget,
n = n_widget,
y = y_widget,
z = z_widget);
return(update_2)
###############################################################################################################
#--------------------------------------------------------------------------------------------
# DISPLAY THE PARAMETERS OF THE FILE (For SATELLITE PRODUCTS)
#--------------------------------------------------------------------------------------------
###############################################################################################################
@staticmethod
def display_param_satellite(ds2,selected_list2,file_name2,scale_min,scale_max,scale):
########## Initialize the widget ------------------------------------------------------------------
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=selected_list2,
value=selected_list2[5],
description='Parameters:',
disabled=False)
#-------------------------------------------------------------------------------------------------
## Define a function that updates the value of x_widget
def update_3(*args):
param_name=x_widget.value
x_widget.observe(update_3)
#-------------------------------------------------------------------------------------------------
####### configure the display according to the selected parameter
def random_function(x):
param_name=x
if param_name == 'sea_surface_temperature' or param_name == 'adjusted_sea_surface_temperature':
ds2 = xarray.open_dataset(file_name2)
ds2[param_name][0,:,:]=ds2[param_name][0,:,:]-273.15
else:
ds2 = xarray.open_dataset(file_name2)
ds2[param_name]=ds2[param_name]
button = widgets.Button(description='''Display The Param''')
out = widgets.Output()
# define the displaying button
def on_button_clicked(_):
# "linking function with output"
with out:
try:
varb=param_name
# a condition to see if there is a variable that represents the depth in this parameter
if len(ds2[varb].shape) < 4:
# display the selected parameter on a map
lons2 = ds2.variables['lon'][:]
lats2 = ds2.variables['lat'][:]
variable_ds2 = ds2.variables[varb][:]
variable_name = varb
lon2, lat2 = np.meshgrid(lons2, lats2)
plt.figure(figsize=(30,30))
plt.subplot(121)
map = Basemap(llcrnrlon=-40,llcrnrlat=20,urcrnrlon=60,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 800, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
#--------------------------------------------------------
# display the selected parameter for a zoomed area of the image
plt.subplot(122)
map = Basemap(llcrnrlon=7,llcrnrlat=50,urcrnrlon=32,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1200, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
plt.show()
#------------------------------------------------------------------
else:
# display the selected parameter on a map
lons2 = ds2.variables['lon'][:]
lats2 = ds2.variables['lat'][:]
variable_ds2 = ds2.variables[varb][:]
variable_name = varb
lon2, lat2 = np.meshgrid(lons2, lats2)
plt.figure(figsize=(30,30))
plt.subplot(121)
map = Basemap(llcrnrlon=-40,llcrnrlat=20,urcrnrlon=60,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 800, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
#------------------------------------------------------------------
# display the selected parameter for a zoomed area of the image
plt.subplot(122)
map = Basemap(llcrnrlon=7,llcrnrlat=50,urcrnrlon=32,urcrnrlat=70, epsg=4326)
x2, y2 = map(lon2, lat2)
if scale == 'Same_as_Model_Product':
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs2=map.contourf(x2,y2,variable_ds2[0,0,:,:],cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1200, verbose= False)
cbar2=map.colorbar(cs2,location='bottom',pad="5%")
cbar2.set_label(f'{variable_name}', fontsize=15)
plt.title('SATELLITE OBSERVATION-PRODUCT', fontsize=20)
plt.show()
#-----------------------------------------------------------------------------------
except:
print ("Displaying doesn't work, please choose another product (example : SST_EUR_SST_L3S_NRT_OBSERVATIONS_010_009_a) ")
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
a=widgets.VBox([button,out])
return(a)
#----------------------------------------------------------------------------------------------------------------
# display the interaction between the widget
interact(random_function,
x = x_widget);
############################# INSITU PRODUCT ###########################################
########################################################################################
############################################################################################
#--------------------------------------------------------------------------------------------
# DOWNLOAD THE FILES
#--------------------------------------------------------------------------------------------
############################################################################################
@staticmethod
def Insitu_Products_download(host,user,password):
# Get the list of all Insitu products offered by the cmems catalog
data = {'In Situ NRT products': []}
NRT_products = []
#connect to CMEMS FTP
with ftputil.FTPHost('nrt.cmems-du.eu', user, password) as ftp_host:
ftp_host.chdir('Core')
product_list = ftp_host.listdir(ftp_host.curdir)
for product in product_list:
items = product.split('_')
if 'INSITU' in items:
NRT_products.append(product)
#------------------------------------------------------------------
########## Initialize the widgets ------------------------------------------------------------------
x_widget = widgets.Dropdown(layout={'width': 'initial'},
options=NRT_products,
value=NRT_products[1],
description='Product:',
disabled=False)
product_name=x_widget.value
index_file = 'index_latest.txt' #type aimed index file (index_latest - index_monthly - index_history )
with ftputil.FTPHost(host, user, password) as ftp_host:
#open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
raw_index_info = pd.read_csv(indexfile, skiprows=5) #load it as pandas dataframe
def flatten(items):
"""Yield items from any nested iterable"""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
for sub_x in flatten(x):
yield sub_x
else:
yield x
items=[]
for i in range(len(raw_index_info.parameters)):
items.append(raw_index_info.parameters[i].split(' '))
items=list(flatten(items))
items = list(set(items))
y_widget = widgets.Dropdown(layout={'width': 'initial'},description='Parameter:')
y_widget.options=items
try:
y_widget.value=items[items.index("TEMP")]
except:
y_widget.value=items[0]
style = {'description_width': 'initial'}
with ftputil.FTPHost(host, user, password) as ftp_host:
#open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
raw_index_info = pd.read_csv(indexfile, skiprows=5) #load it as pandas dataframe
z_widget = widgets.Text(layout={'width': 'initial'},value='2019-06-03T23:00:00Z',description=f'Enter an initial date between {raw_index_info.time_coverage_start[0]} and {raw_index_info.time_coverage_start[len(raw_index_info.time_coverage_start)-1]} : ',style=style)
w_widget = widgets.Text(layout={'width': 'initial'},value='2019-06-04T22:59:59Z',description=f'Enter an end date between {raw_index_info.time_coverage_end[0]} and {raw_index_info.time_coverage_end[len(raw_index_info.time_coverage_end)-1]} : ',style=style)
display(pd.DataFrame(data=data))
#-----------------------------------------------------------------------------------------------------
####### Define a function that updates the content of (y_widget,w_widget,z_widget) based on what we select for x_widget
def update4(*args):
product_name=x_widget.value
index_file = 'index_latest.txt' #type aimed index file (index_latest - index_monthly - index_history )
with ftputil.FTPHost(host, user, password) as ftp_host:
#open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
raw_index_info = pd.read_csv(indexfile, skiprows=5) #load it as pandas dataframe
z_widget.description=f'Enter an initial date between {raw_index_info.time_coverage_start[0]} and {raw_index_info.time_coverage_start[len(raw_index_info.time_coverage_start)-1]} : '
w_widget.description=f'Enter an end date between {raw_index_info.time_coverage_end[0]} and {raw_index_info.time_coverage_end[len(raw_index_info.time_coverage_end)-1]} : '
with ftputil.FTPHost(host, user, password) as ftp_host:
#open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
raw_index_info = pd.read_csv(indexfile, skiprows=5) #load it as pandas dataframe
def flatten(items):
"""Yield items from any nested iterable"""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
for sub_x in flatten(x):
yield sub_x
else:
yield x
items=[]
for i in range(len(raw_index_info.parameters)):
items.append(raw_index_info.parameters[i].split(' '))
items=list(flatten(items))
items = list(set(items))
y_widget.options=items
try:
y_widget.value=items[items.index("TEMP")]
except:
y_widget.value=items[0]
z_widget.value='2019-06-03T23:00:00Z'
w_widget.value='2019-06-04T22:59:59Z'
return(x_widget.value,y_widget.value)
x_widget.observe(update4,'value')
y_widget.observe(update4,'value')
#---------------------------------------------------------------------------------------------------------------
######## Define the download procedure using the ftp protocol
def random_function(x, y, z, w):
product_name=x
aimed_parameter=y
date_format = "%Y-%m-%dT%H:%M:%SZ"
ini = datetime.datetime.strptime(z, date_format)
end = datetime.datetime.strptime(w, date_format)
# define the downloading button
button = widgets.Button(description='''Download The Files''')
out = widgets.Output()
def on_button_clicked(_):
# "linking function with output"
with out:
# set the output_directory of the files
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/03_Insitu_product'
else:
output_directory=os.getcwd()+'/cmems_data/03_Insitu_product'
#-------------------------------------------------------
# creating a folder using the output_directory
p = Path(output_directory)
p.mkdir(parents=True, exist_ok=True)
####### downloading the files using the ftp protocol
print(f'Downloading The Files in {output_directory}')
# connect to CMEMS FTP
dataset_all=[]
with ftputil.FTPHost(host, user, password) as ftp_host:
# open the index file to read
with ftp_host.open("Core"+'/'+product_name+'/'+index_file, "r") as indexfile:
# read the index file as a comma-separate-value file
index = np.genfromtxt(indexfile, skip_header=6, unpack=False, delimiter=',', dtype=None, names=['catalog_id', 'file_name','geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lon_min','geospatial_lon_max','time_coverage_start', 'time_coverage_end', 'provider', 'date_update', 'data_mode', 'parameters'])
# loop over the lines/netCDFs and download the most sutable ones for you
for netCDF in index:
# getting ftplink, filepath and filename
ftplink = netCDF['file_name'].decode('utf-8')
filepath = '/'.join(ftplink.split('/')[3:len(ftplink.split('/'))])
ncdf_file_name = ftplink[ftplink.rfind('/')+1:]
# download netCDF if meeting selection criteria
parameters = netCDF['parameters'].decode('utf-8')
time_start = datetime.datetime.strptime(netCDF['time_coverage_start'].decode('utf-8'), date_format)
time_end = datetime.datetime.strptime(netCDF['time_coverage_start'].decode('utf-8'), date_format)
if aimed_parameter in parameters and time_start > ini and time_end < end:
if ftp_host.path.isfile(filepath):
cwd = os.getcwd()
os.chdir(output_directory)
ftp_host.download(filepath, ncdf_file_name) # remote, local
dataset_all.append(ncdf_file_name)
os.chdir(cwd)
# create a text file using the output directory containing all the names of downloaded netcdf files
with open(output_directory+'/Datasets_downloaded.txt', 'w') as filehandle:
for listitem in dataset_all:
filehandle.write('%s\n' % listitem)
if dataset_all == []:
print("No files were downloaded, please check that the chosen date is wide enough and that it is between the time range indicated at the top")
else:
print('Done')
return(dataset_all)
#------------------------------------------------------------
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
aa=widgets.VBox([button,out])
display(aa)
# display the interaction between the widgets
interact(random_function,
x = x_widget,
y = y_widget,
z = z_widget,
w = w_widget);
return(update4)
#############################################################################################
#--------------------------------------------------------------------------------------------
# READ THE DOWNLOADED THE FILES
#--------------------------------------------------------------------------------------------
#############################################################################################
@staticmethod
def Insitu_read_files(dataset_all):
# get the current directory of the files
if os.getcwd() == '/home/jovyan/public':
output_directory='/home/jovyan/work'+'/cmems_data/03_Insitu_product'
else:
output_directory=os.getcwd()+'/cmems_data/03_Insitu_product'
# reading the netcdf files
All_ds=[]
for i in range(len(dataset_all)):
vars()[f'ds_{i+1}'] = xarray.open_dataset(output_directory+f'/{dataset_all[i]}')
All_ds.append(vars()[f'ds_{i+1}'])
return(All_ds)
#############################################################################################
#--------------------------------------------------------------------------------------------
# DISPLAY THE PARAMTERS OF THE DOWNLOADED THE FILES
#--------------------------------------------------------------------------------------------
#############################################################################################
@staticmethod
def display_Insitu_product(All_ds,selected_product,param,scale_min,scale_max,scale):
try:
if selected_product == 'INSITU_BAL_NRT_OBSERVATIONS_013_032':
# get the values of the selected parameter at the surface (depth=0) for all the netcdfs files
var_temp_test2=[]
for i in range(len(All_ds)):
if All_ds[i][param][0].size > 1:
var_temp_test=All_ds[i][param][0,0]
var_temp_test2.append(var_temp_test)
else:
var_temp_test=All_ds[i][param][0]
var_temp_test2.append(var_temp_test)
var_temp_test2=np.asarray(var_temp_test2, dtype=np.float)[:]
#--------------------------------------------------------------------------------------------
# get the values of the latitude of the selected parameter on the surface (depth = 0) for all netcdf files
lats_test2=[]
for i in range(len(All_ds)):
lats_test=All_ds[i]['LATITUDE'][0]
lats_test2.append(lats_test)
lats_test2=np.asarray(lats_test2, dtype=np.float)[:]
#--------------------------------------------------------------------------------------------------------
# get the values of the longitude of the selected parameter on the surface (depth = 0) for all netcdf files
lons_test2=[]
for i in range(len(All_ds)):
lons_test=All_ds[i]['LONGITUDE'][0]
lons_test2.append(lons_test)
lons_test2=np.asarray(lons_test2, dtype=np.float)[:]
#----------------------------------------------------------------------------------------------------------
# display the selected points (of all netcdf files at depth=0) on a map
plt.figure(figsize=(20,7))
map = Basemap(llcrnrlon=7,llcrnrlat=50,urcrnrlon=32,urcrnrlat=70, epsg=4326)
s= 200*np.ones(len(All_ds))
if scale == 'Same_as_Model_Product':
cs3=map.scatter(lons_test2,lats_test2 , c=var_temp_test2,s=s,cmap=plt.cm.jet,vmin=scale_min, vmax=scale_max)
else:
cs3=map.scatter(lons_test2,lats_test2 , c=var_temp_test2,s=s,cmap=plt.cm.jet)
map.arcgisimage(service='ESRI_Imagery_World_2D', xpixels = 1500, verbose= False)
cbar3=map.colorbar(cs3,location='bottom',pad="5%")
cbar3.set_label('Temperature', fontsize=15)
plt.title('IN-SITU-PRODUCT', fontsize=20)
plt.show()
# get the list of indexes of the files that permit a depth analysis
ii=[]
for i in range(len(All_ds)):
if All_ds[i]['TEMP'][0].size > 1:
ii.append(i)
remove_index=[]
for j in ii:
if All_ds[j]['TEMP'][0].size > 1 and np.isnan(All_ds[j]['TEMP'][0,:]).any() == True:
remove_index.append(ii.index(j))
ii=list(np.delete(ii,remove_index))
#-------------------------------------------------------------------
# get the values of the selected parameter at the surface (depth=0) for the files that permit a depth analysis
var_temp_test2=[]
for i in ii:
var_temp_test=All_ds[i][param][0,0]
var_temp_test2.append(var_temp_test)
var_temp_test2=np.asarray(var_temp_test2, dtype=np.float)[:]
#------------------------------------------------------------------------------------------------------------
# get the values of the latitude of the selected parameter on the surface (depth = 0) for the files that permit a depth analysis
lats_test2=[]
for i in ii:
lats_test=All_ds[i]['LATITUDE'][0]
lats_test2.append(lats_test)
lats_test2=np.asarray(lats_test2, dtype=np.float)[:]
#------------------------------------------------------------------------------------------------------------
# get the values of the latitude of the selected parameter on the surface (depth = 0) for the files that permit a depth analysis
lons_test2=[]
for i in ii:
lons_test=All_ds[i]['LONGITUDE'][0]
lons_test2.append(lons_test)
lons_test2=np.asarray(lons_test2, dtype=np.float)[:]
#------------------------------------------------------------------------------------------------------------
# display the selected points (for the files that permit a depth analysis at depth=0) on a map
fig = plt.figure(figsize=(22,30))
for k in range(len(ii)):
ax = fig.add_subplot(5,4,k+1)
map = Basemap(llcrnrlon=7,llcrnrlat=50,urcrnrlon=32,urcrnrlat=70, epsg=4326)
s= 200*np.ones(1)
if scale == 'Same_as_Model_Product':
cs3=map.scatter(np.array([lons_test2[k]]),
|
np.array([lats_test2[k]])
|
numpy.array
|
import os
import os.path as osp
import cv2
import numpy as np
import vispy
from transforms3d.axangles import axangle2mat
from transforms3d.quaternions import quat2mat
from vispy import app, gloo
# import torch
# import copy
import OpenGL.GL as gl
from lib.render_vispy.frustum import Camera3D
from lib.render_vispy.model3d import Model3D, load_models # noqa
os.environ["PYOPENGL_PLATFORM"] = "egl"
cur_dir = osp.dirname(osp.abspath(__file__))
# app backends: glfw, pyglet, egl
# gl backends: gl2, pyopengl2, gl+
app_backend = "egl"
gl_backend = "gl2" # "pyopengl2" # speed: 'gl+' < 'gl2' < 'pyopengl2'
vispy.use(app=app_backend, gl=gl_backend)
print("vispy uses app: {}, gl: {}".format(app_backend, gl_backend))
def shader_from_path(shader_filename):
shader_path = osp.join(cur_dir, "./shader", shader_filename)
assert osp.exists(shader_path)
with open(shader_path, "r") as f:
return f.read()
def singleton(cls):
instances = {}
def get_instance(size, cam, model_paths=None, scale_to_meter=1.0, gpu_id=None):
if cls not in instances:
instances[cls] = cls(size, cam, model_paths, scale_to_meter, gpu_id)
return instances[cls]
return get_instance
@singleton # Don't throw GL context into trash when having more than one Renderer instance
class Renderer(app.Canvas):
"""
NOTE: internally convert RGB to BGR
"""
def __init__(self, size, cam, model_paths=None, scale_to_meter=1.0, gpu_id=None):
"""
size: (width, height)
"""
app.Canvas.__init__(self, show=False, size=size)
width, height = size
self.height = height
self.width = width
self.shape = (height, width) # height, width
# OpenGL is right-hand with (x+ right, y+ up and z- is forward)
# OpenCV is right-hand with (x+ right, y- up and z+ is forward)
# We define everything in OUR left-hand global system (x+ is right, y+ is up, z+ is forward)
# We therefore must flip Y for OpenCV and Z for OpenGL for every operation
self.opengl_zdir_neg = np.eye(4, dtype=np.float32)
# self.opengl_zdir_neg[2, 2] = -1
self.opengl_zdir_neg[1, 1], self.opengl_zdir_neg[2, 2] = -1, -1
self.set_cam(cam)
self.setup_views()
# Set up shader programs
# fmt: off
_vertex_code_pointcloud = shader_from_path("point_cloud.vs")
_fragment_code_pointcloud = shader_from_path("point_cloud.frag") # varying
_vertex_code_colored = shader_from_path("colored.vs")
_fragment_code_colored = shader_from_path("colored.frag")
# use colored vertex shader
_fragment_code_bbox = shader_from_path("bbox.frag")
_vertex_code_textured = shader_from_path("textured.vs")
_fragment_code_textured = shader_from_path("textured.frag")
_vertex_code_background = shader_from_path("background.vs")
_fragment_code_background = shader_from_path("background.frag")
self.program_pcl = gloo.Program(_vertex_code_pointcloud, _fragment_code_pointcloud)
self.program_col = gloo.Program(_vertex_code_colored, _fragment_code_colored)
self.program_bbox = gloo.Program(_vertex_code_colored, _fragment_code_bbox)
self.program_tex = gloo.Program(_vertex_code_textured, _fragment_code_textured)
self.program_bg = gloo.Program(_vertex_code_background, _fragment_code_background)
# fmt: on
# Texture where we render the color/depth and its FBO
self.col_tex = gloo.Texture2D(shape=self.shape + (3,))
self.fbo = gloo.FrameBuffer(self.col_tex, gloo.RenderBuffer(self.shape))
self.fbo.activate()
# gloo.set_state(depth_test=True, blend=False, cull_face=True)
gloo.set_state(depth_test=True, blend=False, cull_face=False)
gl.glEnable(gl.GL_LINE_SMOOTH)
# gl.glDisable(gl.GL_LINE_SMOOTH)
gloo.set_clear_color((0.0, 0.0, 0.0))
gloo.set_viewport(0, 0, *self.size)
# Set up background render quad in NDC
quad = [[-1, -1], [1, -1], [1, 1], [-1, 1]]
tex = [[0, 1], [1, 1], [1, 0], [0, 0]]
vertices_type = [
("a_position", np.float32, 2),
("a_texcoord", np.float32, 2),
]
collated = np.asarray(list(zip(quad, tex)), vertices_type)
self.bg_vbuffer = gloo.VertexBuffer(collated)
self.bg_ibuffer = gloo.IndexBuffer([0, 1, 2, 0, 2, 3])
self.models = None
if model_paths is not None:
self._load_models(model_paths, scale_to_meter=scale_to_meter)
def _load_models(self, model_paths, scale_to_meter=1.0):
self.models = load_models(model_paths, scale_to_meter=scale_to_meter)
def set_cam(self, cam, clip_near=0.1, clip_far=100.0):
self.cam = cam
self.clip_near = clip_near
self.clip_far = clip_far
self.mat_proj = self.projective_matrix(cam, 0, 0, self.shape[1], self.shape[0], clip_near, clip_far)
def clear(self, color=True, depth=True):
gloo.clear(color=color, depth=depth)
def setup_views(self):
self.view = dict()
self.view["back"] = np.eye(4)
self.view["back"][:3, :3] = axangle2mat(axis=[1, 0, 0], angle=15 * np.pi / 180)
self.view["back"][:3, 3] = [0, -2.0, -3.25]
self.view["center"] = np.eye(4)
self.view["front"] = np.eye(4)
self.view["front"][:3, :3] = axangle2mat(axis=[1, 0, 0], angle=9 * np.pi / 180)
self.view["front"][:3, 3] = [0, 0, 3.25]
self.view["show"] = np.eye(4)
self.view["show"][:3, :3] = axangle2mat(axis=[1, 0, 0], angle=5 * np.pi / 180) @ axangle2mat(
axis=[0, 1, 0], angle=-15 * np.pi / 180
)
self.view["show"][:3, 3] = [-3.5, -1, -5]
self.used_view = "center"
def finish(self, only_color=False, to_255=False):
# NOTE: the colors in Model3D were converted into BGR, so the rgb loaded here is BGR
im = gl.glReadPixels(0, 0, self.size[0], self.size[1], gl.GL_RGB, gl.GL_FLOAT)
# Read buffer and flip X
rgb = np.copy(np.frombuffer(im, np.float32)).reshape(self.shape + (3,))[::-1, :]
if to_255:
rgb = (rgb * 255 + 0.5).astype(np.uint8)
if only_color:
return rgb
im = gl.glReadPixels(
0,
0,
self.size[0],
self.size[1],
gl.GL_DEPTH_COMPONENT,
gl.GL_FLOAT,
)
# Read buffer and flip X
dep = np.copy(np.frombuffer(im, np.float32)).reshape(self.shape + (1,))[::-1, :]
# Convert z-buffer to depth map
mult = (self.clip_near * self.clip_far) / (self.clip_near - self.clip_far)
addi = self.clip_far / (self.clip_near - self.clip_far)
bg = dep == 1
dep = mult / (dep + addi)
dep[bg] = 0
return rgb, np.squeeze(dep)
def compute_rotation(self, eye_point, look_point):
up = [0, 1, 0]
if eye_point[0] == 0 and eye_point[1] != 0 and eye_point[2] == 0:
up = [0, 0, -1]
rot = np.zeros((3, 3))
rot[2] = look_point - eye_point
rot[2] /= np.linalg.norm(rot[2])
rot[0] = np.cross(rot[2], up)
rot[0] /= np.linalg.norm(rot[0])
rot[1] = np.cross(rot[0], -rot[2])
return rot.T
def _validate_pose(self, pose, rot_type="mat"):
if rot_type == "mat":
res = pose
if pose.shape[0] == 3:
res = np.concatenate(
(
pose,
np.array([0, 0, 0, 1], dtype=np.float32).reshape(1, 4),
),
axis=0,
)
elif rot_type == "quat":
res = np.eye(4)
res[:3, :3] = quat2mat(pose[:4])
res[:3, 3] = pose[4:7]
else:
raise ValueError(f"wrong rot_type: {rot_type}")
return res # 4x4
def draw_detection_boundingbox(
self,
pose,
extents,
view="center",
is_gt=False,
thickness=1.5,
centroid=0,
rot_type="mat",
):
"""
centroid: [0,0,0]
"""
assert view in ["front", "top", "back", "show", "center"]
pose = self._validate_pose(pose, rot_type=rot_type)
xsize, ysize, zsize = extents
# fmt: off
bb = np.asarray([[-xsize / 2, ysize / 2, zsize / 2], [xsize / 2, ysize / 2, zsize / 2],
[-xsize / 2, -ysize / 2, zsize / 2], [xsize / 2, -ysize / 2, zsize / 2],
[-xsize / 2, ysize / 2, -zsize / 2], [xsize / 2, ysize / 2, -zsize / 2],
[-xsize / 2, -ysize / 2, -zsize / 2], [xsize / 2, -ysize / 2, -zsize / 2]])
# Set up rendering data
bb += centroid
if is_gt:
colors = [[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 1], [0, 1, 1], [0, 1, 1]]
else:
colors = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 1, 0], [1, 1, 0], [1, 1, 0], [1, 1, 0]]
# fmt: on
indices = [
0,
1,
0,
2,
3,
1,
3,
2,
4,
5,
4,
6,
7,
5,
7,
6,
0,
4,
1,
5,
2,
6,
3,
7,
]
vertices_type = [
("a_position", np.float32, 3),
("a_color", np.float32, 3),
]
collated = np.asarray(list(zip(bb, colors)), vertices_type)
self.program_bbox.bind(gloo.VertexBuffer(collated))
# Flip from our system and .T since OpenGL is column-major
self.program_bbox["u_model"] = (self.opengl_zdir_neg.dot(pose)).T
self.program_bbox["u_view"] = self.view[view].T
self.program_bbox["u_projection"] = self.mat_proj
gloo.set_line_width(width=thickness)
self.program_bbox.draw("lines", gloo.IndexBuffer(indices))
gloo.set_line_width(width=1.0)
def draw_camera(
self,
pose=None,
color=[0, 1, 0],
scaler=1.0,
view="center",
rot_type="mat",
):
if pose is None:
pose = np.eye(4)
else:
pose = self._validate_pose(pose)
assert view in ["front", "top", "back", "show"]
cam = Camera3D(color=color, scaler=scaler)
# View matrix (transforming the coordinate system from OpenCV to OpenGL camera space)
# Flip from our system and .T since OpenGL is column-major
mv = (self.opengl_zdir_neg.dot(pose)).T
self.program_bbox.bind(cam.vertex_buffer)
self.program_bbox["u_model"] = mv
self.program_bbox["u_view"] = self.view[view].T
self.program_bbox["u_projection"] = self.mat_proj
gloo.set_line_width(width=2.5)
self.program_bbox.draw("lines", cam.index_buffer)
gloo.set_line_width(width=1.0)
def draw_pointcloud(self, points, colors=None, s_color=None, radius=1.0, view="center"):
assert view in ["center", "front", "top", "back", "show"]
points =
|
np.copy(points)
|
numpy.copy
|
from tengine import tg
import numpy as np
import cv2
import argparse
import os
import time
DEFAULT_MAX_BOX_COUNT = 100
DEFAULT_REPEAT_COUNT = 1
DEFAULT_THREAD_COUNT = 1
DEFAULT_IMG_H = 300
DEFAULT_IMG_W = 300
DEFAULT_SCALE = 0.008
DEFAULT_MEAN1 = 127.5
DEFAULT_MEAN2 = 127.5
DEFAULT_MEAN3 = 127.5
SHOW_THRESHOLD = 0.5
parser = argparse.ArgumentParser(description='mobilenet_ssd')
parser.add_argument('-m', '--model', default='./models/mobilenet_ssd.tmfile', type=str)
parser.add_argument('-i', '--image', default='./images/dog.jpg', type=str)
parser.add_argument('-r', '--repeat_count', default=f'{DEFAULT_REPEAT_COUNT}', type=str)
parser.add_argument('-t', '--thread_count', default=f'{DEFAULT_THREAD_COUNT}', type=str)
def get_current_time():
return time.time() * 1000
def draw_box(img, x1, y1, x2, y2, w=2, r=125, g=0, b=125):
im_h,im_w,im_c = img.shape
#print("draw_box", im_h, im_w, x1, x2, y1, y2)
x1 = np.clip(x1, 0, im_w)
x2 =
|
np.clip(x2, 0, im_w)
|
numpy.clip
|
# plotProjectedEllipse.py
# Writen By: <NAME>
# Written on June 12, 2020
import numpy as np
import matplotlib.pyplot as plt
from exodetbox.projectedEllipse import xyz_3Dellipse
from exodetbox.projectedEllipse import timeFromTrueAnomaly
import astropy.units as u
##########################################################################################################
def plotProjectedEllipse(ind, sma, e, W, w, inc, Phi, dmajorp, dminorp, Op, num):
plt.close(num)
fig = plt.figure(num=num)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ca = plt.gca()
ca.axis('equal')
## Central Sun
plt.scatter([0],[0],color='orange')
## 3D Ellipse
vs = np.linspace(start=0,stop=2*np.pi,num=300)
r = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],vs)
x_3Dellipse = r[0,0,:]
y_3Dellipse = r[1,0,:]
plt.plot(x_3Dellipse,y_3Dellipse,color='black')
#plot 3D Ellipse Center
plt.scatter(Op[0][ind],Op[1][ind],color='black')
#ang2 = (theta_OpQ_X[ind]+theta_OpQp_X[ind])/2
ang2 = Phi[ind]
dmajorpx1 = Op[0][ind] + dmajorp[ind]*np.cos(ang2)
dmajorpy1 = Op[1][ind] + dmajorp[ind]*np.sin(ang2)
dmajorpx2 = Op[0][ind] + dmajorp[ind]*np.cos(ang2+np.pi)
dmajorpy2 = Op[1][ind] + dmajorp[ind]*np.sin(ang2+np.pi)
plt.plot([Op[0][ind],dmajorpx1],[Op[1][ind],dmajorpy1],color='purple',linestyle='-')
plt.plot([Op[0][ind],dmajorpx2],[Op[1][ind],dmajorpy2],color='purple',linestyle='-')
dminorpx1 = Op[0][ind] + dminorp[ind]*np.cos(ang2+np.pi/2)
dminorpy1 = Op[1][ind] + dminorp[ind]*np.sin(ang2+np.pi/2)
dminorpx2 = Op[0][ind] + dminorp[ind]*np.cos(ang2-np.pi/2)
dminorpy2 = Op[1][ind] + dminorp[ind]*np.sin(ang2-np.pi/2)
plt.plot([Op[0][ind],dminorpx1],[Op[1][ind],dminorpy1],color='purple',linestyle='-')
plt.plot([Op[0][ind],dminorpx2],[Op[1][ind],dminorpy2],color='purple',linestyle='-')
plt.title('sma: ' + str(np.round(sma[ind],4)) + ' e: ' + str(np.round(e[ind],4)) + ' W: ' + str(np.round(W[ind],4)) + '\nw: ' + str(np.round(w[ind],4)) + ' inc: ' + str(np.round(inc[ind],4)))
plt.show(block=False)
####
def plot3DEllipseto2DEllipseProjectionDiagram(ind, sma, e, W, w, inc, Op, Phi,\
dmajorp, dminorp, num):
"""
"""
plt.close(num)
fig = plt.figure(num)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax = fig.add_subplot(111, projection='3d')
## 3D Ellipse
vs = np.linspace(start=0,stop=2*np.pi,num=300)
r = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],vs)
x_3Dellipse = r[0,0,:]
y_3Dellipse = r[1,0,:]
z_3Dellipse = r[2,0,:]
ax.plot(x_3Dellipse,y_3Dellipse,z_3Dellipse,color='black',label='Planet Orbit',linewidth=2)
min_z = np.min(z_3Dellipse)
## Central Sun
ax.scatter(0,0,0,color='orange',marker='o',s=25) #of 3D ellipse
ax.text(0,0,0.15*np.abs(min_z), 'F', None)
ax.plot([0,0],[0,0],[0,1.3*min_z],color='orange',linestyle='--',linewidth=2) #connecting line
ax.scatter(0,0,1.3*min_z,color='orange',marker='x',s=25) #of 2D ellipse
ax.text(0,0,1.5*min_z, 'F\'', None)
## Plot 3D Ellipse semi-major/minor axis
rper = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],0.) #planet position perigee
rapo = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],np.pi) #planet position apogee
ax.plot([rper[0][0],rapo[0][0]],[rper[1][0],rapo[1][0]],[rper[2][0],rapo[2][0]],color='purple', linestyle='-',linewidth=2) #3D Ellipse Semi-major axis
ax.scatter(rper[0][0],rper[1][0],rper[2][0],color='grey',marker='D',s=25) #3D Ellipse Perigee Diamond
ax.text(1.2*rper[0][0],1.2*rper[1][0],rper[2][0], 'A', None)
ax.scatter(rper[0][0],rper[1][0],1.3*min_z,color='blue',marker='D',s=25) #2D Ellipse Perigee Diamond
ax.text(1.1*rper[0][0],1.1*rper[1][0],1.3*min_z, 'A\'', None)
ax.plot([rper[0][0],rper[0][0]],[rper[1][0],rper[1][0]],[rper[2][0],1.3*min_z],color='grey',linestyle='--',linewidth=2) #3D to 2D Ellipse Perigee Diamond
ax.scatter(rapo[0][0],rapo[1][0],rapo[2][0],color='grey', marker='D',s=25) #3D Ellipse Apogee Diamond
ax.text(1.1*rapo[0][0],1.1*rapo[1][0],1.2*rapo[2][0], 'B', None)
ax.scatter(rapo[0][0],rapo[1][0],1.3*min_z,color='blue',marker='D',s=25) #2D Ellipse Perigee Diamond
ax.text(1.1*rapo[0][0],1.1*rapo[1][0],1.3*min_z, 'B\'', None)
ax.plot([rapo[0][0],rapo[0][0]],[rapo[1][0],rapo[1][0]],[rapo[2][0],1.3*min_z],color='grey',linestyle='--',linewidth=2) #3D to 2D Ellipse Apogee Diamond
rbp = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],np.arccos((np.cos(np.pi/2)-e[ind])/(1-e[ind]*np.cos(np.pi/2)))) #3D Ellipse E=90
rbm = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],-np.arccos((np.cos(-np.pi/2)-e[ind])/(1-e[ind]*np.cos(-np.pi/2)))) #3D Ellipse E=-90
ax.plot([rbp[0][0],rbm[0][0]],[rbp[1][0],rbm[1][0]],[rbp[2][0],rbm[2][0]],color='purple', linestyle='-',linewidth=2) #
ax.scatter(rbp[0][0],rbp[1][0],rbp[2][0],color='grey',marker='D',s=25) #3D ellipse minor +
ax.text(1.1*rbp[0][0],1.1*rbp[1][0],1.2*rbp[2][0], 'C', None)
ax.scatter(rbp[0][0],rbp[1][0],1.3*min_z,color='blue',marker='D',s=25) #2D ellipse minor+ projection
ax.text(1.1*rbp[0][0],1.1*rbp[1][0],1.3*min_z, 'C\'', None)
ax.plot([rbp[0][0],rbp[0][0]],[rbp[1][0],rbp[1][0]],[rbp[2][0],1.3*min_z],color='grey',linestyle='--',linewidth=2) #3D to 2D Ellipse minor + Diamond
ax.scatter(rbm[0][0],rbm[1][0],rbm[2][0],color='grey', marker='D',s=25) #3D ellipse minor -
ax.text(1.1*rbm[0][0],0.5*(rbm[1][0]-Op[1][ind]),rbm[2][0]+0.05, 'D', None)
ax.scatter(rbm[0][0],rbm[1][0],1.3*min_z,color='blue', marker='D',s=25) #2D ellipse minor- projection
ax.text(1.1*rbm[0][0],0.5*(rbm[1][0]-Op[1][ind]),1.3*min_z, 'D\'', None)
ax.plot([rbm[0][0],rbm[0][0]],[rbm[1][0],rbm[1][0]],[rbm[2][0],1.3*min_z],color='grey',linestyle='--',linewidth=2) #3D to 2D Ellipse minor - Diamond
## Plot K, H, P
ax.scatter(0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
0.6*(rapo[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2,color='green', marker='x',s=36) #Point along OB, point H
ax.text(0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
0.6*(rapo[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2+0.1,'H', None) #Point along OB, point H
xscaletmp = np.sqrt(1-.6**2)
ax.scatter(xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
xscaletmp*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2,color='green',marker='x',s=36) #point along OC, point K
ax.text(xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
xscaletmp*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2+0.1,'K',None) #point along OC, point K
angtmp = np.arctan2(0.6,xscaletmp)
ax.scatter(np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2,\
np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2,\
np.cos(angtmp)*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rapo[2][0] - (rper[2][0] + rapo[2][0])/2)*np.sin(angtmp) + (rper[2][0] + rapo[2][0])/2,color='green',marker='o',s=25) #Point P on 3D Ellipse
ax.text(np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2,\
np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2,\
np.cos(angtmp)*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rapo[2][0] - (rper[2][0] + rapo[2][0])/2)*np.sin(angtmp) + (rper[2][0] + rapo[2][0])/2+0.1,'P',None) #Point P on 3D Ellipse
## Plot KP, HP
ax.plot([0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2],\
[0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2],\
[0.6*(rapo[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2,np.cos(angtmp)*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rapo[2][0] - (rper[2][0] + rapo[2][0])/2)*np.sin(angtmp) + (rper[2][0] + rapo[2][0])/2],linestyle=':',color='black') #H to P line
ax.plot([xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2],\
[xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2],\
[xscaletmp*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2,np.cos(angtmp)*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rapo[2][0] - (rper[2][0] + rapo[2][0])/2)*np.sin(angtmp) + (rper[2][0] + rapo[2][0])/2],linestyle=':',color='black') #K to P line
## Plot K', H', P'
ax.scatter(0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
1.3*min_z,color='magenta', marker='x',s=36) #Point along O'B', point H'
ax.text(0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
1.3*min_z-0.1,'H\'',None) #Point along O'B', point H'
xscaletmp = np.sqrt(1-.6**2)
ax.scatter(xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
1.3*min_z,color='magenta',marker='x',s=36) #point along O'C', point K'
ax.text(xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,\
xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,\
1.3*min_z-0.1,'K\'',None) #point along O'C', point K'
angtmp = np.arctan2(0.6,xscaletmp)
ax.scatter(np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2,\
np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2,\
1.3*min_z,color='magenta',marker='o',s=25) #Point P' on 2D Ellipse
ax.text(np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2,\
np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2,\
1.3*min_z-0.1,'P\'',None) #Point P' on 2D Ellipse
## Plot K'P', H'P'
ax.plot([0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2],\
[0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2],\
[1.3*min_z,1.3*min_z],linestyle=':',color='black') #H to P line
ax.plot([xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2],\
[xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2],\
[1.3*min_z,1.3*min_z],linestyle=':',color='black') #K to P line
## Plot PP', KK', HH'
ax.plot([np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2,np.cos(angtmp)*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rapo[0][0] - (rper[0][0] + rapo[0][0])/2)*np.sin(angtmp) + (rper[0][0] + rapo[0][0])/2],\
[np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2,np.cos(angtmp)*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rapo[1][0] - (rper[1][0] + rapo[1][0])/2)*np.sin(angtmp) + (rper[1][0] + rapo[1][0])/2],\
[np.cos(angtmp)*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rapo[2][0] - (rper[2][0] + rapo[2][0])/2)*np.sin(angtmp) + (rper[2][0] + rapo[2][0])/2,1.3*min_z],color='black',linestyle=':') #PP'
ax.plot([0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,0.6*(rapo[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2],\
[0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,0.6*(rapo[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2],\
[0.6*(rapo[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2,1.3*min_z],color='black',linestyle=':') #HH'
ax.plot([xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2,xscaletmp*(rbp[0][0] - (rper[0][0] + rapo[0][0])/2) + (rper[0][0] + rapo[0][0])/2],\
[xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2,xscaletmp*(rbp[1][0] - (rper[1][0] + rapo[1][0])/2) + (rper[1][0] + rapo[1][0])/2],\
[xscaletmp*(rbp[2][0] - (rper[2][0] + rapo[2][0])/2) + (rper[2][0] + rapo[2][0])/2,1.3*min_z],color='black',linestyle=':') #KK'
## Plot Conjugate Diameters
ax.plot([rbp[0][0],rbm[0][0]],[rbp[1][0],rbm[1][0]],[1.3*min_z,1.3*min_z],color='blue',linestyle='-',linewidth=2) #2D ellipse minor+ projection
ax.plot([rper[0][0],rapo[0][0]],[rper[1][0],rapo[1][0]],[1.3*min_z,1.3*min_z],color='blue',linestyle='-',linewidth=2) #2D Ellipse Perigee Diamond
## Plot Ellipse Center
ax.scatter((rper[0][0] + rapo[0][0])/2,(rper[1][0] + rapo[1][0])/2,(rper[2][0] + rapo[2][0])/2,color='grey',marker='o',s=36) #3D Ellipse
ax.text(1.2*(rper[0][0] + rapo[0][0])/2,1.2*(rper[1][0] + rapo[1][0])/2,1.31*(rper[2][0] + rapo[2][0])/2, 'O', None)
ax.scatter(Op[0][ind],Op[1][ind], 1.3*min_z, color='grey', marker='o',s=25) #2D Ellipse Center
ax.text(1.2*(rper[0][0] + rapo[0][0])/2,1.2*(rper[1][0] + rapo[1][0])/2,1.4*min_z, 'O\'', None)
ax.plot([(rper[0][0] + rapo[0][0])/2,Op[0][ind]],[(rper[1][0] + rapo[1][0])/2,Op[1][ind]],[(rper[2][0] + rapo[2][0])/2,1.3*min_z],color='grey',linestyle='--',linewidth=2) #Plot ) to )''
#ang2 = (theta_OpQ_X[ind]+theta_OpQp_X[ind])/2
ang2 = Phi[ind]
dmajorpx1 = Op[0][ind] + dmajorp[ind]*np.cos(ang2)
dmajorpy1 = Op[1][ind] + dmajorp[ind]*np.sin(ang2)
dmajorpx2 = Op[0][ind] + dmajorp[ind]*np.cos(ang2+np.pi)
dmajorpy2 = Op[1][ind] + dmajorp[ind]*np.sin(ang2+np.pi)
ax.plot([Op[0][ind],dmajorpx1],[Op[1][ind],dmajorpy1],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dmajorpx2],[Op[1][ind],dmajorpy2],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
dminorpx1 = Op[0][ind] + dminorp[ind]*np.cos(ang2+np.pi/2)
dminorpy1 = Op[1][ind] + dminorp[ind]*np.sin(ang2+np.pi/2)
dminorpx2 = Op[0][ind] + dminorp[ind]*np.cos(ang2-np.pi/2)
dminorpy2 = Op[1][ind] + dminorp[ind]*np.sin(ang2-np.pi/2)
ax.plot([Op[0][ind],dminorpx1],[Op[1][ind],dminorpy1],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dminorpx2],[Op[1][ind],dminorpy2],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
dmajorpx1 = Op[0][ind] + dmajorp[ind]*np.cos(ang2)
dmajorpy1 = Op[1][ind] + dmajorp[ind]*np.sin(ang2)
dmajorpx2 = Op[0][ind] + dmajorp[ind]*np.cos(ang2+np.pi)
dmajorpy2 = Op[1][ind] + dmajorp[ind]*np.sin(ang2+np.pi)
dminorpx1 = Op[0][ind] + dminorp[ind]*np.cos(ang2+np.pi/2)
dminorpy1 = Op[1][ind] + dminorp[ind]*np.sin(ang2+np.pi/2)
dminorpx2 = Op[0][ind] + dminorp[ind]*np.cos(ang2-np.pi/2)
dminorpy2 = Op[1][ind] + dminorp[ind]*np.sin(ang2-np.pi/2)
ax.plot([Op[0][ind],dmajorpx1],[Op[1][ind],dmajorpy1],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dmajorpx2],[Op[1][ind],dmajorpy2],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dminorpx1],[Op[1][ind],dminorpy1],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dminorpx2],[Op[1][ind],dminorpy2],[1.3*min_z,1.3*min_z],color='purple',linestyle='-',linewidth=2)
ax.scatter([dmajorpx1,dmajorpx2,dminorpx1,dminorpx2],[dmajorpy1,dmajorpy2,dminorpy1,dminorpy2],[1.3*min_z,1.3*min_z,1.3*min_z,1.3*min_z],color='black',marker='o',s=25,zorder=6)
ax.text(1.05*dmajorpx1,1.05*dmajorpy1,1.3*min_z, 'I', None)#(dmajorpx1,dmajorpy1,0))
ax.text(1.1*dmajorpx2,1.1*dmajorpy2,1.3*min_z, 'R', None)#(dmajorpx2,dmajorpy2,0))
ax.text(1.05*dminorpx1,0.1*(dminorpy1-Op[1][ind]),1.3*min_z, 'S', None)#(dminorpx1,dminorpy1,0))
ax.text(1.05*dminorpx2,1.05*dminorpy2,1.3*min_z, 'T', None)#(dminorpx2,dminorpy2,0))
#ax.text(x,y,z, label, zdir)
x_projEllipse = Op[0][ind] + dmajorp[ind]*np.cos(vs)*np.cos(ang2) - dminorp[ind]*np.sin(vs)*np.sin(ang2)
y_projEllipse = Op[1][ind] + dmajorp[ind]*np.cos(vs)*np.sin(ang2) + dminorp[ind]*np.sin(vs)*np.cos(ang2)
ax.plot(x_projEllipse,y_projEllipse,1.3*min_z*np.ones(len(vs)), color='red', linestyle='-',zorder=5,linewidth=2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.grid(False)
#artificial box
xmax = np.max([np.abs(rper[0][0]),np.abs(rapo[0][0]),np.abs(1.3*min_z)])
ax.scatter([-xmax,xmax],[-xmax,xmax],[-0.2-np.abs(1.3*min_z),0.2+1.3*min_z],color=None,alpha=0)
ax.set_xlim3d(-0.99*xmax+Op[0][ind],0.99*xmax+Op[0][ind])
ax.set_ylim3d(-0.99*xmax+Op[1][ind],0.99*xmax+Op[1][ind])
ax.set_zlim3d(-0.99*xmax,0.99*xmax)
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) #remove background color
ax.set_axis_off() #removes axes
plt.title('sma: ' + str(np.round(sma[ind],4)) + ' e: ' + str(np.round(e[ind],4)) + ' W: ' + str(np.round(W[ind],4)) + '\nw: ' + str(np.round(w[ind],4)) + ' inc: ' + str(np.round(inc[ind],4)))
plt.show(block=False)
####
def plotEllipseMajorAxisFromConjugate(ind, sma, e, W, w, inc, Op, Phi,\
dmajorp, dminorp, num):
""" Plots the Q and Q' points as well as teh line
"""
plt.close(num)
fig = plt.figure(num)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ax = plt.gca()
## 3D Ellipse
vs = np.linspace(start=0,stop=2*np.pi,num=300)
r = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],vs)
x_3Dellipse = r[0,0,:]
y_3Dellipse = r[1,0,:]
z_3Dellipse = r[2,0,:]
ax.plot(x_3Dellipse,y_3Dellipse,color='black',label='Planet Orbit',linewidth=2)
min_z = np.min(z_3Dellipse)
## Central Sun
ax.scatter(0,0,color='orange',marker='x',s=25,zorder=20) #of 2D ellipse
ax.text(0-.1,0-.1, 'F\'', None)
## Plot 3D Ellipse semi-major/minor axis
rper = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],0.) #planet position perigee
rapo = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],np.pi) #planet position apogee
ax.scatter(rper[0][0],rper[1][0],color='blue',marker='D',s=25,zorder=25) #2D Ellipse Perigee Diamond
ax.text(1.1*rper[0][0],1.1*rper[1][0], 'A\'', None)
ax.scatter(rapo[0][0],rapo[1][0],color='blue',marker='D',s=25,zorder=25) #2D Ellipse Perigee Diamond
ax.text(1.1*rapo[0][0]-0.1,1.1*rapo[1][0], 'B\'', None)
rbp = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],np.arccos((np.cos(np.pi/2)-e[ind])/(1-e[ind]*np.cos(np.pi/2)))) #3D Ellipse E=90
rbm = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],-np.arccos((np.cos(-np.pi/2)-e[ind])/(1-e[ind]*np.cos(-np.pi/2)))) #3D Ellipse E=-90
ax.plot([rbp[0][0],rbm[0][0]],[rbp[1][0],rbm[1][0]],color='purple', linestyle='-',linewidth=2) #
ax.scatter(rbp[0][0],rbp[1][0],color='blue',marker='D',s=25,zorder=20) #2D ellipse minor+ projection
ax.text(1.1*rbp[0][0]-.01,1.1*rbp[1][0]-.05, 'C\'', None)
ax.scatter(rbm[0][0],rbm[1][0],color='blue', marker='D',s=25,zorder=20) #2D ellipse minor- projection
ax.text(1.1*rbm[0][0],0.5*(rbm[1][0]-Op[1][ind])-.05, 'D\'', None)
## Plot QQ' Line
#rapo[0][0],rapo[1][0] #B'
#rbp[0][0],rbp[1][0] #C'
#Op[0][ind],Op[1][ind] #O'
tmp = np.asarray([-(rbp[1][0]-Op[1][ind]),(rbp[0][0]-Op[0][ind])])
QQp_hat = tmp/np.linalg.norm(tmp)
dOpCp = np.sqrt((rbp[0][0]-Op[0][ind])**2 + (rbp[1][0]-Op[1][ind])**2)
#Q = Bp - dOpCp*QQp_hat
Qx = rapo[0][0] - dOpCp*QQp_hat[0]
Qy = rapo[1][0] - dOpCp*QQp_hat[1]
#Qp = Bp + DOpCp*QQp_hat
Qpx = rapo[0][0] + dOpCp*QQp_hat[0]
Qpy = rapo[1][0] + dOpCp*QQp_hat[1]
ax.plot([Op[0][ind],Qx],[Op[1][ind],Qy],color='black',linestyle='-',linewidth=2,zorder=29) #OpQ
ax.plot([Op[0][ind],Qpx],[Op[1][ind],Qpy],color='black',linestyle='-',linewidth=2,zorder=29) #OpQp
ax.plot([Qx,Qpx],[Qy,Qpy],color='grey',linestyle='-',linewidth=2,zorder=29)
ax.scatter([Qx,Qpx],[Qy,Qpy],color='grey',marker='s',s=36,zorder=30)
ax.text(Qx,Qy-0.1,'Q', None)
ax.text(Qpx,Qpy+0.05,'Q\'', None)
## Plot Conjugate Diameters
ax.plot([rbp[0][0],rbm[0][0]],[rbp[1][0],rbm[1][0]],color='blue',linestyle='-',linewidth=2) #2D ellipse minor+ projection
ax.plot([rper[0][0],rapo[0][0]],[rper[1][0],rapo[1][0]],color='blue',linestyle='-',linewidth=2) #2D Ellipse Perigee Diamond
## Plot Ellipse Center
ax.scatter(Op[0][ind],Op[1][ind], color='grey', marker='o',s=25,zorder=30) #2D Ellipse Center
ax.text(1.2*(rper[0][0] + rapo[0][0])/2,1.2*(rper[1][0] + rapo[1][0])/2+0.05, 'O\'', None)
#ang2 = (theta_OpQ_X[ind]+theta_OpQp_X[ind])/2
ang2 = Phi[ind]
dmajorpx1 = Op[0][ind] + dmajorp[ind]*np.cos(ang2)
dmajorpy1 = Op[1][ind] + dmajorp[ind]*np.sin(ang2)
dmajorpx2 = Op[0][ind] + dmajorp[ind]*np.cos(ang2+np.pi)
dmajorpy2 = Op[1][ind] + dmajorp[ind]*np.sin(ang2+np.pi)
ax.plot([Op[0][ind],dmajorpx1],[Op[1][ind],dmajorpy1],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dmajorpx2],[Op[1][ind],dmajorpy2],color='purple',linestyle='-',linewidth=2)
dminorpx1 = Op[0][ind] + dminorp[ind]*np.cos(ang2+np.pi/2)
dminorpy1 = Op[1][ind] + dminorp[ind]*np.sin(ang2+np.pi/2)
dminorpx2 = Op[0][ind] + dminorp[ind]*np.cos(ang2-np.pi/2)
dminorpy2 = Op[1][ind] + dminorp[ind]*np.sin(ang2-np.pi/2)
ax.plot([Op[0][ind],dminorpx1],[Op[1][ind],dminorpy1],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dminorpx2],[Op[1][ind],dminorpy2],color='purple',linestyle='-',linewidth=2)
dmajorpx1 = Op[0][ind] + dmajorp[ind]*np.cos(ang2)
dmajorpy1 = Op[1][ind] + dmajorp[ind]*np.sin(ang2)
dmajorpx2 = Op[0][ind] + dmajorp[ind]*np.cos(ang2+np.pi)
dmajorpy2 = Op[1][ind] + dmajorp[ind]*np.sin(ang2+np.pi)
dminorpx1 = Op[0][ind] + dminorp[ind]*np.cos(ang2+np.pi/2)
dminorpy1 = Op[1][ind] + dminorp[ind]*np.sin(ang2+np.pi/2)
dminorpx2 = Op[0][ind] + dminorp[ind]*np.cos(ang2-np.pi/2)
dminorpy2 = Op[1][ind] + dminorp[ind]*np.sin(ang2-np.pi/2)
ax.plot([Op[0][ind],dmajorpx1],[Op[1][ind],dmajorpy1],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dmajorpx2],[Op[1][ind],dmajorpy2],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dminorpx1],[Op[1][ind],dminorpy1],color='purple',linestyle='-',linewidth=2)
ax.plot([Op[0][ind],dminorpx2],[Op[1][ind],dminorpy2],color='purple',linestyle='-',linewidth=2)
ax.scatter([dmajorpx1,dmajorpx2,dminorpx1,dminorpx2],[dmajorpy1,dmajorpy2,dminorpy1,dminorpy2],color='black',marker='o',s=25,zorder=6)
ax.text(1.05*dmajorpx1,1.05*dmajorpy1, 'I', None)
ax.text(1.1*dmajorpx2,1.1*dmajorpy2, 'R', None)
ax.text(1.05*dminorpx1,0.1*(dminorpy1-Op[1][ind])-.05, 'S', None)
ax.text(1.05*dminorpx2-0.1,1.05*dminorpy2-.075, 'T', None)
x_projEllipse = Op[0][ind] + dmajorp[ind]*np.cos(vs)*np.cos(ang2) - dminorp[ind]*np.sin(vs)*np.sin(ang2)
y_projEllipse = Op[1][ind] + dmajorp[ind]*np.cos(vs)*np.sin(ang2) + dminorp[ind]*np.sin(vs)*np.cos(ang2)
ax.plot(x_projEllipse,y_projEllipse, color='red', linestyle='-',zorder=5,linewidth=2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.grid(False)
xmax = np.max([np.abs(rper[0][0]),np.abs(rapo[0][0]),np.abs(1.3*min_z), np.abs(Qpx), np.abs(Qx)])
ax.scatter([-xmax,xmax],[-xmax,xmax],color=None,alpha=0)
ax.set_xlim(-0.99*xmax+Op[0][ind],0.99*xmax+Op[0][ind])
ax.set_ylim(-0.99*xmax+Op[1][ind],0.99*xmax+Op[1][ind])
ax.set_axis_off() #removes axes
ax.axis('equal')
plt.title('sma: ' + str(np.round(sma[ind],4)) + ' e: ' + str(np.round(e[ind],4)) + ' W: ' + str(np.round(W[ind],4)) + '\nw: ' + str(np.round(w[ind],4)) + ' inc: ' + str(np.round(inc[ind],4)))
plt.show(block=False)
def plotDerotatedEllipse(ind, sma, e, W, w, inc, Phi, dmajorp, dminorp, Op, x, y, num=879):
plt.close(num)
fig = plt.figure(num=num)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ca = plt.gca()
ca.axis('equal')
plt.scatter([0],[0],color='orange')
## Plot 3D Ellipse
vs = np.linspace(start=0,stop=2*np.pi,num=300)
r = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],vs)
x_3Dellipse = r[0,0,:]
y_3Dellipse = r[1,0,:]
plt.plot(x_3Dellipse,y_3Dellipse,color='black')
## Plot 3D Ellipse Center
plt.scatter(Op[0][ind],Op[1][ind],color='black')
## Plot Rotated Ellipse
#ang2 = (theta_OpQ_X[ind]+theta_OpQp_X[ind])/2
ang2 = Phi[ind]
dmajorpx1 = Op[0][ind] + dmajorp[ind]*np.cos(ang2)
dmajorpy1 = Op[1][ind] + dmajorp[ind]*np.sin(ang2)
dmajorpx2 = Op[0][ind] + dmajorp[ind]*np.cos(ang2+np.pi)
dmajorpy2 = Op[1][ind] + dmajorp[ind]*np.sin(ang2+np.pi)
dminorpx1 = Op[0][ind] + dminorp[ind]*np.cos(ang2+np.pi/2)
dminorpy1 = Op[1][ind] + dminorp[ind]*np.sin(ang2+np.pi/2)
dminorpx2 = Op[0][ind] + dminorp[ind]*np.cos(ang2-np.pi/2)
dminorpy2 = Op[1][ind] + dminorp[ind]*np.sin(ang2-np.pi/2)
plt.plot([Op[0][ind],dmajorpx1],[Op[1][ind],dmajorpy1],color='purple',linestyle='-')
plt.plot([Op[0][ind],dmajorpx2],[Op[1][ind],dmajorpy2],color='purple',linestyle='-')
plt.plot([Op[0][ind],dminorpx1],[Op[1][ind],dminorpy1],color='purple',linestyle='-')
plt.plot([Op[0][ind],dminorpx2],[Op[1][ind],dminorpy2],color='purple',linestyle='-')
#new plot stuff
Erange = np.linspace(start=0.,stop=2*np.pi,num=400)
plt.plot([-dmajorp[ind],dmajorp[ind]],[0,0],color='purple',linestyle='--') #major
plt.plot([0,0],[-dminorp[ind],dminorp[ind]],color='purple',linestyle='--') #minor
xellipsetmp = dmajorp[ind]*np.cos(Erange)
yellipsetmp = dminorp[ind]*np.sin(Erange)
plt.plot(xellipsetmp,yellipsetmp,color='black')
plt.scatter(x[ind],y[ind],color='orange',marker='x')
c_ae = dmajorp[ind]*np.sqrt(1-dminorp[ind]**2/dmajorp[ind]**2)
plt.scatter([-c_ae,c_ae],[0,0],color='blue')
plt.title('sma: ' + str(np.round(sma[ind],4)) + ' e: ' + str(np.round(e[ind],4)) + ' W: ' + str(np.round(W[ind],4)) + '\nw: ' + str(np.round(w[ind],4)) + ' inc: ' + str(np.round(inc[ind],4)))
plt.show(block=False)
def plotReorientationMethod(ind, sma, e, W, w, inc, x, y, Phi, Op, dmajorp, dminorp,\
minSepPoints_x, minSepPoints_y, num):
plt.close(num)
fig = plt.figure(num=num)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
ca = plt.gca()
ca.axis('equal')
plt.scatter([0],[0],color='orange')
## Plot 3D Ellipse
vs = np.linspace(start=0,stop=2*np.pi,num=300)
r = xyz_3Dellipse(sma[ind],e[ind],W[ind],w[ind],inc[ind],vs)
x_3Dellipse = r[0,0,:]
y_3Dellipse = r[1,0,:]
plt.plot(x_3Dellipse,y_3Dellipse,color='black')
## Plot 3D Ellipse Center
plt.scatter(Op[0][ind],Op[1][ind],color='black')
## Plot Rotated Ellipse
#ang2 = (theta_OpQ_X[ind]+theta_OpQp_X[ind])/2
ang2 = Phi[ind]
dmajorpx1 = Op[0][ind] + dmajorp[ind]*np.cos(ang2)
dmajorpy1 = Op[1][ind] + dmajorp[ind]*np.sin(ang2)
dmajorpx2 = Op[0][ind] + dmajorp[ind]*np.cos(ang2+np.pi)
dmajorpy2 = Op[1][ind] + dmajorp[ind]*np.sin(ang2+np.pi)
dminorpx1 = Op[0][ind] + dminorp[ind]*np.cos(ang2+np.pi/2)
dminorpy1 = Op[1][ind] + dminorp[ind]*np.sin(ang2+np.pi/2)
dminorpx2 = Op[0][ind] + dminorp[ind]*np.cos(ang2-np.pi/2)
dminorpy2 = Op[1][ind] + dminorp[ind]*np.sin(ang2-np.pi/2)
plt.plot([Op[0][ind],dmajorpx1],[Op[1][ind],dmajorpy1],color='purple',linestyle='-')
plt.plot([Op[0][ind],dmajorpx2],[Op[1][ind],dmajorpy2],color='purple',linestyle='-')
plt.plot([Op[0][ind],dminorpx1],[Op[1][ind],dminorpy1],color='purple',linestyle='-')
plt.plot([Op[0][ind],dminorpx2],[Op[1][ind],dminorpy2],color='purple',linestyle='-')
#new plot stuff
Erange = np.linspace(start=0.,stop=2*np.pi,num=400)
plt.plot([-dmajorp[ind],dmajorp[ind]],[0,0],color='purple',linestyle='--') #major
plt.plot([0,0],[-dminorp[ind],dminorp[ind]],color='purple',linestyle='--') #minor
xellipsetmp = dmajorp[ind]*np.cos(Erange)
yellipsetmp = dminorp[ind]*np.sin(Erange)
plt.plot(xellipsetmp,yellipsetmp,color='black')
plt.scatter(x[ind],y[ind],color='orange',marker='x')
c_ae = dmajorp[ind]*np.sqrt(1-dminorp[ind]**2/dmajorp[ind]**2)
plt.scatter([-c_ae,c_ae],[0,0],color='blue')
plt.scatter(minSepPoints_x[ind],minSepPoints_y[ind],color='magenta')
ux =
|
np.cos(Phi[ind])
|
numpy.cos
|
# Bit-banged SPI driver for NI DAQ boxes (which also does standard analog data collection)
# The driver uses the two analog outputs as sclk and mosi, since they support synchronization with analog inputs.
# It requires sclk to be looped back into an analog input for synchronization reasons, and also reads miso via another analog input.
# Lastly, it pulses SS high via a digital output on startup to keep the slave synced with the master.
# This driver can currently read analog data at ~20k samples/sec and interact with SPI at ~1100 bytes/sec (although with about 0.25 secs of latency between a write and its response).
import sys
import threading
import time
import numpy as np
import msgpack
import nidaqmx
from nidaqmx import stream_writers
import zmq
SERVER_URL = "tcp://192.168.0.2:5559"
RATE = 20000
READ_BULK = 200
TEST_SPI_BYTES = 40
SCLK = "ao0"
MOSI = "ao1"
SCLK_LOOPBACK = "ai0"
MISO = "ai8"
SS = "pfi4"
SPI_VOLTAGE = 5
system = nidaqmx.system.System.local()
if len(system.devices) == 0:
print("Error: No device detected.")
sys.exit(1)
if len(system.devices) > 1:
print("Error: Multiple devices detected. Please only connect one device.")
sys.exit(1)
print(f"Found device {system.devices[0].product_type}.")
context = zmq.Context()
sender = context.socket(zmq.PUB)
sender.connect(SERVER_URL)
aow_lock = threading.Lock()
# We control the analog outputs by writing to a buffer on the NI box which they then read from.
# However, we use it in a mode where it will not loop back to old data in the buffer if it runs dry, and so if it runs dry it crashes instead.
# This means that we need to keep the buffer saturated with zeroes. However, if we keep the buffer too full,
# there is unreasonable latency between when we "write" data and when it actually gets sent out / we can read in the result.
# This function constantly checks how full the buffer is and tries to keep it about 1/4 of a second full.
# This function runs alone in a thread so it can loop all it likes.
def saturate_zeros(ao, aow):
t = time.time()
while True:
buf = ao.out_stream.curr_write_pos - ao.out_stream.total_samp_per_chan_generated # This is the number of samples currently in the buffer
diff = RATE // 4 - buf # try to keep 0.25s in the buffer
if diff > 0:
with aow_lock:
aow.write_many_sample(np.zeros((2, diff), dtype=np.float64))
t += 0.25 / 2 # check at twice as fast as the amount we want in the buffer
time.sleep(max(t - time.time(), 0))
# Here we read both the incoming SPI bits and general analog data.
# Note that because of how the synchronization works, our sclk loopback is actually one sample ahead of the corresponding miso value.
def read_spi(ai):
last_miso = [0]
bit = 7
byte = 0
bytes_in = []
analog_avg = None
spi_avg = None
spi_time = time.time()
while True:
analog_time = time.time()
sclk, miso, *data = ai.read(number_of_samples_per_channel=READ_BULK, timeout=0.1)
miso, last_miso = last_miso + miso[:-1], miso[-1:] # account for the sclk loopback / miso offset
for i in range(len(sclk)):
if sclk[i] > SPI_VOLTAGE / 2: # Clock high, read a bit and add it to our WIP byte
byte |= round(miso[i] / SPI_VOLTAGE) << bit
if bit == 0:
bit = 7
bytes_in.append(byte)
byte = 0
else:
bit -= 1
# Sample analog data 'calibration'.
data = (time.time(), "ANALOG", [[(d - 0.000505) * 6550 for d in data[0]]] + data[1:])
sender.send(msgpack.packb(data))
if len(bytes_in) > TEST_SPI_BYTES: # we've amassed a full SPI response
if spi_avg is None:
spi_avg = [time.time() - spi_time for _ in range(100)]
else:
spi_avg = spi_avg[1:] + [time.time() - spi_time]
spi_data, bytes_in = bytes_in[:TEST_SPI_BYTES], bytes_in[TEST_SPI_BYTES:]
sender.send(msgpack.packb((time.time(), "SPI", spi_data)))
spi_time = time.time()
if analog_avg is None:
analog_avg = [time.time() - analog_time for _ in range(100)]
else:
analog_avg = analog_avg[1:] + [time.time() - analog_time]
print(f"\rAnalog Rate: {READ_BULK/np.mean(analog_avg):.0f} samples/sec SPI Rate: {TEST_SPI_BYTES/np.mean(spi_avg or [1]):.0f} bytes/sec Buffer health: {100 - ai.in_stream.avail_samp_per_chan * 100 / max(ai.in_stream.input_buf_size, 1): >5.1f}% ", end='')
# Encodes some bytes into the SPI pulses needed to write them, adds said pulses to the write queue.
def send(aow, bytes_out):
clkdata = []
mosidata = []
for byte_out in bytes_out:
# Since these are analog channels, a binary 1 is represented by a voltage.
clkdata += [SPI_VOLTAGE*(n % 2) for n in range(16)] + [0]
mosidata += [SPI_VOLTAGE*bool(byte_out & (1 << (n//2))) for n in range(15, -1, -1)] + [0] # keep data valid for a full clock pulse, 2 samples
clkdata += [0]
mosidata += [0]
with aow_lock:
aow.write_many_sample(np.array([clkdata, mosidata], dtype=np.float64))
with nidaqmx.Task() as ao, nidaqmx.Task() as ai, nidaqmx.Task() as do:
# Set up our SPI channels
ao.ao_channels.add_ao_voltage_chan(f"Dev1/{SCLK}")
ao.ao_channels.add_ao_voltage_chan(f"Dev1/{MOSI}")
ai.ai_channels.add_ai_voltage_chan(f"Dev1/{SCLK_LOOPBACK}", min_val=-10, max_val=10, terminal_config=nidaqmx.constants.TerminalConfiguration.RSE)
ai.ai_channels.add_ai_voltage_chan(f"Dev1/{MISO}", min_val=-10, max_val=10, terminal_config=nidaqmx.constants.TerminalConfiguration.RSE)
do.do_channels.add_do_chan(f"Dev1/{SS}")
# Set up generic analog input channels
ai.ai_channels.add_ai_voltage_chan("Dev1/ai16", min_val=-0.2, max_val=0.2, terminal_config=nidaqmx.constants.TerminalConfiguration.DIFFERENTIAL)
for c in [1, 2, 3, 4, 5, 6, 7]:
ai.ai_channels.add_ai_voltage_chan(f"Dev1/ai{c}", min_val=-10, max_val=10, terminal_config=nidaqmx.constants.TerminalConfiguration.RSE)
ao.timing.cfg_samp_clk_timing(RATE, sample_mode=nidaqmx.constants.AcquisitionType.CONTINUOUS)
ai.timing.cfg_samp_clk_timing(RATE, source='/Dev1/ao/SampleClock', sample_mode=nidaqmx.constants.AcquisitionType.CONTINUOUS) # synchronize our reading with when we write
ao.out_stream.regen_mode = nidaqmx.constants.RegenerationMode.DONT_ALLOW_REGENERATION # disable repeating old data, instead error if we run out
ao.out_stream.output_buf_size = RATE # one second of buffer. For latency, we want to keep this buffer as empty as possible.
aow = stream_writers.AnalogMultiChannelWriter(ao.out_stream) # Lets us stream data into the buffer and thus out onto the pin.
aow.auto_start = False
aow.write_many_sample(
|
np.zeros((2, RATE // 4), dtype=np.float64)
|
numpy.zeros
|
'''
裁判系统统合类
'''
import time
import numpy as np
from serial_package import offical_Judge_Handler, Game_data_define
import queue
from radar_class.config import enemy,BO
###### 采自官方demo ###########
ind = 0 # 发送id序号(0-4)
Id_red = 1
Id_blue = 101
buffercnt = 0
buffer = [0]
buffer *= 1000
cmdID = 0
indecode = 0
def ControlLoop_red():
'''
循环红色车辆id
'''
global Id_red
if Id_red == 5:
Id_red = 1
else:
Id_red = Id_red + 1
def ControlLoop_blue():
'''
循环蓝色车辆id
'''
global Id_blue
if Id_blue == 105:
Id_blue = 101
else:
Id_blue = Id_blue + 1
def read(ser):
global buffercnt
buffercnt = 0
global buffer
global cmdID
global indecode
# TODO:qt thread
while True:
s = ser.read(1)
s = int().from_bytes(s, 'big')
# doc.write('s: '+str(s)+' ')
if buffercnt > 50:
buffercnt = 0
# print(buffercnt)
buffer[buffercnt] = s
# doc.write('buffercnt: '+str(buffercnt)+' ')
# doc.write('buffer: '+str(buffer[buffercnt])+'\n')
# print(hex(buffer[buffercnt]))
if buffercnt == 0:
if buffer[buffercnt] != 0xa5:
buffercnt = 0
continue
if buffercnt == 5:
if offical_Judge_Handler.myVerify_CRC8_Check_Sum(id(buffer), 5) == 0:
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 7:
cmdID = (0x0000 | buffer[5]) | (buffer[6] << 8)
# print("cmdID")
# print(cmdID)
if buffercnt == 10 and cmdID == 0x0002:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 10):
Referee_Game_Result()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 20 and cmdID == 0x0001:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 20):
# 比赛阶段信息
UART_passer.Referee_Update_GameData()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 41 and cmdID == 0x0003:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 41):
# 各车血量
UART_passer.Referee_Robot_HP()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 12 and cmdID == 0x0004:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 12):
Referee_dart_status()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 13 and cmdID == 0x0101:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 13):
Referee_event_data()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 13 and cmdID == 0x0102:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 13):
Refree_supply_projectile_action()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 11 and cmdID == 0x0104:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 11):
Refree_Warning()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 10 and cmdID == 0x0105:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 10):
Refree_dart_remaining_time()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 17 and cmdID == 0x301: # 2bite数据
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 17):
# 比赛阶段信息
UART_passer.Receive_Robot_Data()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 25 and cmdID == 0x202: # 雷达没有
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 25):
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 25 and cmdID == 0x203: # 雷达没有
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 25):
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 27 and cmdID == 0x201: # 雷达没有
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 27):
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 10 and cmdID == 0x204: # 雷达没有
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 10):
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 10 and cmdID == 0x206: # 雷达没有
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 10):
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 13 and cmdID == 0x209: # 雷达没有
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 13):
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 16 and cmdID == 0x0301:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 16):
# Refree_map_stop()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
if buffercnt == 24 and cmdID == 0x0303:
if offical_Judge_Handler.myVerify_CRC16_Check_Sum(id(buffer), 24):
# 云台手通信
Refree_Arial_Message()
buffercnt = 0
if buffer[buffercnt] == 0xa5:
buffercnt = 1
continue
buffercnt += 1
Game_state = Game_data_define.game_state()
Game_result = Game_data_define.game_result()
Game_robot_HP = Game_data_define.game_robot_HP()
Game_dart_status = Game_data_define.dart_status()
Game_event_data = Game_data_define.event_data()
Game_supply_projectile_action = Game_data_define.supply_projectile_action()
Game_refree_warning = Game_data_define.refree_warning()
Game_dart_remaining_time = Game_data_define.dart_remaining_time()
################################################
class UART_passer(object):
'''
convert the Judge System message
自定义裁判系统统合类
'''
# message box controller
_bytes2int = lambda x:(0x0000 | x[0]) | (x[1] << 8)
_hp_up = np.array([100,150,200,250,300,350,400,450,500]) # 各个升级血量阶段
_init_hp = np.ones(10,dtype = int)*500 # initialize all car units' hp as 500
_last_hp = _init_hp.copy()
_HP = np.ones(16,dtype = int)*500 # to score the received hp message, initialize all as 500
_max_hp = _init_hp.copy() # store the hp maximum hp of each car
_set_max_flag = False # When the game starts, set their maximum hp in the beginning of the game via receiving the first hp message.
# location controller
_robot_location = np.zeros((5,2),dtype=np.float32) # the enemy car location received from the alarm class
# alarming event queue with priority
_queue = queue.PriorityQueue(-1)
# Game State controller
_BO = 0
_stage = ["NOT START", "PREPARING", "CHECKING", "5S", "PLAYING", "END"]
_Now_Stage = 0
_Game_Start_Flag = False
_Game_End_Flag = False
Remain_time = 0
# Arial control flag 云台手控制置位符
change_view = False
anti_dart = False
open_base = False
getposition = False
_HP_thres = 0 # 发送预警的血量阈值,即预警区域中血量最高的车辆的血量低于该阈值则不发送预警
_prevent_time = [2.,2.,2.,2.,2.,2.] # sleep time 预警发送沉默时间,若距离上次发送小于该阈值则不发送
_event_prevent = np.zeros(6) # 距离时间记录
loop_send = 0 # 在一次小地图车辆id循环中发送的次数
@staticmethod
def _judge_max_hp(HP):
'''
血量最大值变化判断逻辑,by 何若坤,只适用于21赛季规则
'''
mask_zero = UART_passer._last_hp > 0 # 血量为0不判断
focus_hp = HP[[0,1,2,3,4,8,9,10,11,12]] # 只关心这些位置的血量上限
# 工程车血量上限不变,不判断
mask_engineer = np.array([True]*10)
mask_engineer[[1,6]] = False
mask_engineer = np.logical_and(mask_zero,mask_engineer)
# 若血量增加在30到80,则为一级上升(50)
mask_level1 = np.logical_and(focus_hp-UART_passer._last_hp>30,focus_hp-UART_passer._last_hp<=80)
# 若血量增加在80以上,则为二级上升(100)
mask_level2 = focus_hp-UART_passer._last_hp > 80
UART_passer._max_hp[np.logical_and(mask_level1,mask_engineer)] += 50
UART_passer._max_hp[
|
np.logical_and(mask_level2,mask_engineer)
|
numpy.logical_and
|
import numpy as np
from skimage import measure
from astropy.io import fits
from astropy.table import Table
import sys, glob, os
import datetime
import re
num_args = len(sys.argv) - 1
if num_args == 1:
start_run, stop_run = [int(sys.argv[1])]*2
run_str = f"### Doing run {start_run}"
elif num_args == 2:
start_run, stop_run = sorted(sys.argv[1:3])
run_str = f"### Doing runs {start_run} to {stop_run}"
else:
sys.exit(f"Usage: {sys.argv[0]} <start_run> [<stop_run>]")
cwd = os.getcwd()
input_dir = cwd[:cwd.rindex('/') + 1]
with open('framedist_vals', 'r') as f:
framedist = np.array(f.readline().split(), int) #these are the random values from perl
#am using to make sure I get the same result across the two.
date = datetime.datetime.now().astimezone()
date_string = date.strftime("%a %d %b %Y %I:%M:%S %p %Z").rstrip()
#print("############################################################")
#print(f"### Started {sys.argv[0]} on {date_string}")
#print(run_str)
#//line 99
# define defaults
evtth = .1 # 100 eV for WFI Geant4 simulations
splitth = evtth # same as evtth for WFI Geant4 simulations
npixthresh = 5 # minimum number of pixels in a blob
mipthresh = 15. # minimum ionizing particle threshold in keV
clip_energy = 22. # maximum pixel value reported by WFI, in keV
skip_writes = -1 # writes FITS images for every skip_writes primary;
# set to -1 to turn off writes
evperchan = 1000. # why not? PHA here is really PI
mipthresh_chan = mipthresh * 1000. / evperchan # minimum ionizing particle threshold in PHA units
spec_maxkev = 100.
numchans = int((spec_maxkev*1000.) / evperchan)
gain_intercept = 0. # use this for Geant4 data
gain_slope = 1000. # use this for Geant4 data (pixel PH units are keV)
#gain_intercepts = (0, 0, 0, 0) # in ADU
#gain_slopes = (1000, 1000, 1000, 1000) # in eV/ADU
# rate and frame defaults
proton_flux = 4.1 * 7./5. # protons/s/cm2; 7/5 accounts for alphas, etc.
sphere_radius = 70. # radius of boundary sphere in cm
num_protons_per_run = 1.e6 # number of proton primaries
# in a simulatin run (from Jonathan)
# his email said 1e7, but looking at the
# input files it's really 1e6!!!
detector_area = 4. * (130.e-4 * 512.)**2 # detector area in cm2,
# assuming 130 um pixels
texp_run = num_protons_per_run/3.14159/proton_flux/(sphere_radius**2)
# total exposure time in sec for this run
texp_frame = .005 # frame exposure time in sec
mu_per_frame = num_protons_per_run * texp_frame / texp_run
# minimum ionizing pa
#print(f"### There are {num_protons_per_run} primaries in this run.")
#print(f"### The run exposure time is {texp_run} sec.")
#print(f"### The frame exposure time is {texp_frame} sec")
#print(f"### for an expected mean of {mu_per_frame} primaries per frame.")
"""
Comments...
"""
epicpn_pattern_table = np.array([
0, 13, 3, 13, 13, 13, 13, 13, 4, 13, 8, 12, 13, 13, 13, 13,
2, 13, 7, 13, 13, 13, 11, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
1, 13, 13, 13, 13, 13, 13, 13, 5, 13, 13, 13, 13, 13, 13, 13,
6, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 9, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
10, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
])
# hash of codes indexed by particle type indexed
ptypes = {
'proton': 0,
'gamma': 1,
'electron': 2,
'neutron': 3,
'pi+': 4,
'e+': 5,
'pi-': 6,
'nu_mu': 7,
'anti_nu_mu': 8,
'nu_e': 9,
'kaon+': 10,
'mu+': 11,
'deuteron': 12,
'kaon0L': 13,
'lambda': 14,
'kaon-': 15,
'mu-': 16,
'kaon0S': 17,
'alpha': 18,
'anti_proton': 19,
'triton': 20,
'anti_neutron': 21,
'sigma-': 22,
'sigma+': 23,
'He3': 24,
'anti_lambda': 25,
'anti_nu_e': 26,
'anti_sigma-': 27,
'xi0': 28,
'anti_sigma+': 29,
'xi-': 30,
'anti_xi0': 31,
'C12': 32,
'anti_xi-': 33,
'Li6': 34,
'Al27': 35,
'O16': 36,
'Ne19': 37,
'Mg24': 38,
'Li7': 39,
'He6': 40,
'Be8': 41,
'Be10': 42,
'unknown': 99
}
# initialize rng
rng = np.random.RandomState(1234)
# temporary variables
x, y = 0, 0
"""
Comments...
line 246
"""
# 0-511; indexed by detector number 0-3
x_offset = 513
y_offset = 513
imgsize = 1027
actmin = 0
actmax = 1026
xydep_min = -513
xydep_max = 513
def match(regex: str, string: str):
return re.compile(regex).search(string)
def indexND(array, ind, value=None):
if value is None:
return array[ind[1],ind[0]]
array[ind[1],ind[0]] = value
def wfits(data, fitsfile, ow=True, hdr=None):
if isinstance(data, tuple):
t = Table(data[1], names = data[0])
hdu = fits.table_to_hdu(t)
if hdr:
for key, val in hdr.items():
hdu.header[key] = val
else:
hdu = fits.PrimaryHDU()
hdu.data = data
hdu.writeto(fitsfile, overwrite = ow)
def which(condition):
if condition.ndim != 1:
condition = condition.flat
return np.where(condition)[0]
def whichND(condition):
return np.array(np.where(condition)[::-1])
def which_both(condition):
return which(condition), which(condition == False)
#######################################
# Main loop.
# Step through Geant4 output data files.
# For each one, create a frame per primary,
# find blobs and MIPs, and then find events in that frame.
# Change start_run to parallelize things (i.e. do chunks of 10 runs
# in parallel).
# delte variables later
for this_run in range(start_run, stop_run + 1):
# see if there are files for this run
infiles = glob.glob(f"{input_dir}input/{this_run}_detector?")
if len(infiles) != 4:
print (f"### Found something other than 4 datafiles for {this_run}, skipping.")
continue
# initialize event piddles, which will be written out or used later
runid = np.zeros(0, dtype=int)
detectorid = np.zeros(0, dtype=int)
primid = np.zeros(0, dtype=int)
actx = np.zeros(0, dtype=int)
acty = np.zeros(0, dtype=int)
phas = np.zeros((0,25), dtype=float)
pha = np.zeros(0, dtype=float)
ptype = np.zeros(0, dtype=int)
energy = np.zeros(0, dtype = float) # in keV
evttype = np.zeros(0, dtype=int)
blobdist = np.zeros(0, dtype=float)
mipdist = np.zeros(0, dtype=float)
pattern = np.zeros(0, dtype=int)
vfaint = np.zeros(0, dtype=int)
# assign frames and times to each primary
# to start, assume mean of one primary per second
evt_time = np.zeros(0, dtype=float)
evt_frame = np.zeros(0, dtype=int)
pix_time = np.zeros(0, dtype=float)
pix_frame = np.zeros(0, dtype=int)
# initialize structures to hold the secondary particle columns
# piddles for the numeric columns; these are enough for now
run = np.zeros(0, dtype=int) # Geant4 run (* in *_detector[0123])
detector = np.zeros(0, dtype=int) # WFI detector (? in *_detector?)
eid = np.zeros(0, dtype=int) # primary ID
particleid = np.zeros(0, dtype=int) # interacting particle ID
parentid = np.zeros(0, dtype=int) # don't really need probably
# initialize piddles to hold the energy deposition (per pixel) columns
# some of this will be written out the pixel list
xdep = np.zeros(0, dtype=int)
ydep = np.zeros(0, dtype=int)
endep = np.zeros(0, dtype=float)
rundep = np.zeros(0, dtype=int)
detectordep = np.zeros(0, dtype=int)
eiddep = np.zeros(0, dtype=int)
framedep = np.zeros(0, dtype=int)
piddep = np.zeros(0, dtype=int)
ptypedep = np.zeros(0, dtype=int)
cprocdep = np.zeros(0, dtype=int)
blobid = np.zeros(0, dtype=int)
# initialize piddles to hold frame-specific things to go in FITS table
frame_frame = np.zeros(0, dtype=int)
frame_time = np.zeros(0, dtype=float)
frame_runid = np.zeros(0, dtype=int)
frame_npix = np.zeros(0, dtype=int)
frame_npixmip = np.zeros(0, dtype=int)
frame_nevt = np.zeros(0, dtype=int)
frame_nevtgood = np.zeros(0, dtype=int)
frame_nevt27 = np.zeros(0, dtype=int)
frame_nblob = np.zeros(0, dtype=int)
frame_nprim = np.zeros(0, dtype=int)
# initialize piddles to hold blob-specific things to go in FITS table
blob_frame = np.zeros(0, dtype=int)
blob_blobid = np.zeros(0, dtype=int)
blob_cenx = np.zeros(0, dtype=float)
blob_ceny = np.zeros(0, dtype=float)
blob_cenxcl = np.zeros(0, dtype=float)
blob_cenycl = np.zeros(0, dtype=float)
blob_npix = np.zeros(0, dtype=int)
blob_energy = np.zeros(0, dtype=float)
blob_energycl = np.zeros(0, dtype=float)
# initialize things for the running frames which we will
# randomly populate
# frame settings
# we know there are $num_protons_per_run, so generate enough
# random frames to hold them
##framedist = rng.poisson(mu_per_frame, int(2*num_protons_per_run/mu_per_frame))
cumframedist = framedist.cumsum()
# get the total number of frames needed to capture all the primaries;
# will write this to FITS header so we can combine runs
numtotframes = which(cumframedist >= num_protons_per_run)[0] + 1
# this is wrong, because it will remove the last bin which we need
# it's also unnecessary
#cumframedist = cumframedist[cumframedist <= num_protons_per_run]
# running variables
numevents = 0
numtotblobs = 0
# loop through the four quadrant data files for this run
# now combine all four, since single primary can produce signal
# in multiple quadrants
for infile in infiles:
#print(f"### Reading {infile}")
rc = match('[0-9]+_detector([0-9]+)', infile) #extracts the detector name
this_detector = int(rc.group(1))
ptype = {}
cproc = {}
with open(infile, 'r') as IN:
# step through the input file and accumulate primaries
for line in IN: #switched to a for loop because of built-in __iter__ method
if match('^\s*#', line): #skip comments #added ability to have arbritrary whitespace before '#'
continue
if match('^\s*$', line): #skip blank lines:
continue
if not match(',', line): #could be if ',' not in line
continue
fields = line.rstrip().split(',')
if match('[a-zA-Z]', fields[0]): # if the first column is a string, then this is a particle line
# retain the primary for this interaction
this_eid = int(float(fields[1]))
eid = np.append(eid, this_eid)
# particle type and interaction type are hashes so
# that the pixel-specific read can pick them up
# doesn't matter if the particle ID is re-used from
# primary to primary, since this will reset it
ptype[int(fields[2])] = fields[0]
cproc[int(fields[2])] = fields[4]
else: # if the first column is a number, then this is a pixel hit line
#print(fields)
if float(fields[2]) <= splitth: # skip it if less than split threshold is deposited,
continue #since that is ~ the lower threshold of pixels we'll get
tmp_x, tmp_y = int(fields[0]), int(fields[1])
if tmp_x<xydep_min or tmp_y<xydep_min or tmp_x>xydep_max or tmp_y>xydep_max:
continue # skip it if it's outside the 512x512 region of a quad
xdep = np.append(xdep, tmp_x)
ydep = np.append(ydep, tmp_y)
endep = np.append(endep, float(fields[2]))
rundep = np.append(rundep, this_run)
detectordep = np.append(detectordep, this_detector)
eiddep = np.append(eiddep, this_eid)
framedep = np.append(framedep, 0)
piddep = np.append(piddep, int(fields[3]))
# %ptype is hash of particle type strings indexed by the id
# %ptypes is (constant) hash of my own particle type IDs indexed
# by the string (confused yet?)
ptypedep = np.append(ptypedep, ptypes[ptype.get(int(fields[3]), 'unknown')])
blobid =
|
np.append(blobid, 0)
|
numpy.append
|
from pickle import loads, dumps
import numpy as np
import pandas as pd
from classicML import _cml_precision
from classicML import CLASSICML_LOGGER
from classicML.api.models import BaseModel
from classicML.backend import get_conditional_probability
from classicML.backend import get_dependent_prior_probability
from classicML.backend import get_probability_density
from classicML.backend import type_of_target
from classicML.backend import io
class OneDependentEstimator(BaseModel):
"""独依赖估计器的基类.
Attributes:
attribute_name: list of name, default=None,
属性的名称.
is_trained: bool, default=False,
模型训练后将被标记为True.
is_loaded: bool, default=False,
如果模型加载了权重将被标记为True.
Raises:
NotImplementedError: compile, fit, predict方法需要用户实现.
"""
def __init__(self, attribute_name=None):
"""初始化独依赖估计器.
Arguments:
attribute_name: list of name, default=None,
属性的名称.
"""
super(OneDependentEstimator, self).__init__()
self.attribute_name = attribute_name
self.is_trained = False
self.is_loaded = False
def compile(self, *args, **kwargs):
"""编译独依赖估计器.
"""
raise NotImplementedError
def fit(self, x, y, **kwargs):
"""训练独依赖估计器.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
y: numpy.ndarray or pandas.DataFrame, array-like, 标签.
"""
raise NotImplementedError
def predict(self, x, **kwargs):
"""使用独依赖估计器进行预测.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
"""
raise NotImplementedError
def load_weights(self, filepath):
"""加载模型参数.
Arguments:
filepath: str, 权重文件加载的路径.
Raises:
KeyError: 模型权重加载失败.
Notes:
模型将不会加载关于优化器的超参数.
"""
raise NotImplementedError
def save_weights(self, filepath):
"""将模型权重保存为一个HDF5文件.
Arguments:
filepath: str, 权重文件保存的路径.
Raises:
TypeError: 模型权重保存失败.
Notes:
模型将不会保存关于优化器的超参数.
"""
raise NotImplementedError
class SuperParentOneDependentEstimator(OneDependentEstimator):
"""超父独依赖估计器.
Attributes:
attribute_name: list of name, default=None,
属性的名称.
super_parent_name: str, default=None,
超父的名称.
super_parent_index: int, default=None,
超父的索引值.
_list_of_p_c: list,
临时保存中间的概率依赖数据.
smoothing: bool, default=None,
是否使用平滑, 这里的实现是拉普拉斯修正.
"""
def __init__(self, attribute_name=None):
"""初始化超父独依赖估计器.
Arguments:
attribute_name: list of name, default=None,
属性的名称.
"""
super(SuperParentOneDependentEstimator, self).__init__(attribute_name=attribute_name)
self.super_parent_name = None
self.super_parent_index = None
self.smoothing = None
self._list_of_p_c = list()
def compile(self, super_parent_name, smoothing=True):
"""编译超父独依赖估计器.
Arguments:
super_parent_name: str, default=None,
超父的名称.
smoothing: bool, default=True,
是否使用平滑, 这里的实现是拉普拉斯修正.
"""
self.super_parent_name = super_parent_name
self.smoothing = smoothing
def fit(self, x, y, **kwargs):
"""训练超父独依赖估计器.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
y: numpy.ndarray or pandas.DataFrame, array-like, 标签.
Returns:
SuperParentOneDependentEstimator实例.
"""
if isinstance(x, np.ndarray) and self.attribute_name is None:
CLASSICML_LOGGER.warn("属性名称缺失, 请使用pandas.DataFrame; 或检查 self.attributes_name")
# 为特征数据添加属性信息.
x = pd.DataFrame(x, columns=self.attribute_name)
x.reset_index(drop=True, inplace=True)
y = pd.Series(y)
y.reset_index(drop=True, inplace=True)
for index, feature_name in enumerate(x.columns):
if self.super_parent_name == feature_name:
self.super_parent_index = index
for category in
|
np.unique(y)
|
numpy.unique
|
# minimal example showing how to use raw (external) CUDA kernels with cupy
#
# Aim: unerstand how to load and execute a raw kernel based on addition of two arrays
import pyparallelproj as ppp
from pyparallelproj.phantoms import ellipse2d_phantom
import numpy as np
import math
import argparse
#-------------------------------------------------------------
import cupy as cp
# load a kernel defined in a external file
with open('joseph3d_fwd_cuda.cu','r') as f:
proj_kernel = cp.RawKernel(f.read(), 'joseph3d_fwd_cuda_kernel')
#-------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--counts', help = 'counts to simulate', default = 7e7, type = float)
parser.add_argument('--nsubsets', help = 'number of subsets', default = 1, type = int)
parser.add_argument('--n', help = 'number of averages', default = 5, type = int)
parser.add_argument('--chunks', help = 'number of GPU chunks', default = 1, type = int)
parser.add_argument('--tof', help = 'TOF', action = 'store_true')
parser.add_argument('--img_mem_order', help = 'memory layout for image', default = 'C',
choices = ['C','F'])
parser.add_argument('--sino_dim_order', help = 'axis order in sinogram', default = ['0','1','2'],
nargs = '+')
parser.add_argument('--fov', help = 'FOV: wb -> 600mm, brain -> 250mm', default = 'wb', choices = ['wb','brain'])
parser.add_argument('--voxsize', help = '3 voxel sizes (mm)', default = ['2','2','2'], nargs = '+')
parser.add_argument('--threads_per_block', help = 'threads per block', default = 64, type = int)
args = parser.parse_args()
#---------------------------------------------------------------------------------
print(' '.join([x[0] + ':' + str(x[1]) for x in args.__dict__.items()]))
print('')
nsubsets = args.nsubsets
counts = args.counts / nsubsets
n = args.n
threads_per_block = args.threads_per_block
chunks = args.chunks
tof = args.tof
img_mem_order = args.img_mem_order
subset = 0
voxsize = np.array(args.voxsize, dtype = np.float32)
if args.fov == 'wb':
fov_mm = 600
elif args.fov == 'brain':
fov_mm = 250
spatial_dim_order = np.array(args.sino_dim_order, dtype = int)
if tof:
ntofbins = 27
else:
ntofbins = 1
np.random.seed(1)
#---------------------------------------------------------------------------------
# setup a scanner
scanner = ppp.RegularPolygonPETScanner(ncrystals_per_module =
|
np.array([16,9])
|
numpy.array
|
'''
Created on May 16, 2018
@author: cef
significant scripts for calculating damage within the ABMRI framework
for secondary data loader scripts, see fdmg.datos.py
'''
#===============================================================================
# IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, os, time, re, math, copy, gc, weakref, random, sys
import pandas as pd
import numpy as np
import scipy.integrate
#===============================================================================
# shortcuts
#===============================================================================
from collections import OrderedDict
from hlpr.exceptions import Error
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from model.sofda.hp.basic import OrderedSet
from model.sofda.hp.pd import view
idx = pd.IndexSlice
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
#import hp.plot
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.pd as hp_pd
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.sim as hp_sim
import model.sofda.hp.data as hp_data
import model.sofda.hp.dyno as hp_dyno
import model.sofda.hp.sel as hp_sel
import model.sofda.fdmg.datos_fdmg as datos
#import matplotlib.pyplot as plt
#import matplotlib
#import matplotlib.animation as animation #load the animation module (with the new search path)
#===============================================================================
# custom shortcuts
#===============================================================================
from model.sofda.fdmg.house import House
#from model.sofda.fdmg.dfunc import Dfunc
from model.sofda.fdmg.dmgfeat import Dmg_feat
# logger setup -----------------------------------------------------------------------
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
#===============================================================================
#module level defaults ------------------------------------------------------
#===============================================================================
#datapars_cols = [u'dataname', u'desc', u'datafile_tailpath', u'datatplate_tailpath', u'trim_row'] #headers in the data tab
datafile_types_list = ['.csv', '.xls']
class Fdmg( #flood damage model
hp_sel.Sel_controller, #no init
hp_dyno.Dyno_wrap, #add some empty containers
#hp.plot.Plot_o, #build the label
hp_sim.Sim_model, #Sim_wrap: attach the reset_d. Sim_model: inherit attributes
hp_oop.Trunk_o, #no init
#Parent_cmplx: attach empty kids_sd
#Parent: set some defaults
hp_oop.Child):
"""
#===========================================================================
# INPUTS
#===========================================================================
pars_path ==> pars_file.xls
main external parameter spreadsheet.
See description in file for each column
dataset parameters
tab = 'data'. expected columns: datapars_cols
session parameters
tab = 'gen'. expected rows: sessionpars_rows
"""
#===========================================================================
# program parameters
#===========================================================================
name = 'fdmg'
#list of attribute names to try and inherit from the session
try_inherit_anl = set(['ca_ltail', 'ca_rtail', 'mind', \
'dbg_fld_cnt', 'legacy_binv_f', 'gis_area_max', \
'fprob_mult', 'flood_tbl_nm', 'gpwr_aep', 'dmg_rat_f',\
'joist_space', 'G_anchor_ht', 'bsmt_opn_ht_code','bsmt_egrd_code', \
'damp_func_code', 'cont_val_scale', 'hse_skip_depth', \
'area_egrd00', 'area_egrd01', 'area_egrd02',
'fhr_nm', 'write_fdmg_sum', 'dfeat_xclud_price',
'write_fdmg_sum_fly',
])
fld_aep_spcl = 100 #special flood to try and include in db runs
bsmt_egrd = 'wet' #default value for bsmt_egrd
legacy_binv_f = True #flag to indicate that the binv is in legacy format (use indicies rather than column labels)
gis_area_max = 3500
acode_sec_d = dict() #available acodes with dfunc data loaded (to check against binv request) {acode:asector}
'consider allowing the user control of these'
gis_area_min = 5
gis_area_max = 5000
write_fdmg_sum_fly = False
write_dmg_fly_first = True #start off to signifiy first run
#===========================================================================
# debuggers
#===========================================================================
write_beg_hist = True #whether to write the beg history or not
beg_hist_df = None
#===========================================================================
# user provided values
#===========================================================================
#legacy pars
floor_ht = 0.0
mind = '' #column to match between data sets and name the house objects
#EAD calc
ca_ltail ='flat'
ca_rtail =2 #aep at which zero value is assumeed. 'none' uses lowest aep in flood set
#Floodo controllers
gpwr_aep = 100 #default max aep where gridpower_f = TRUE (when the power shuts off)
dbg_fld_cnt = '0' #for slicing the number of floods we want to evaluate
#area exposure
area_egrd00 = None
area_egrd01 = None
area_egrd02 = None
#Dfunc controllers
place_codes = None
dmg_types = None
flood_tbl_nm = None #name of the flood table to use
#timeline deltas
'just keeping this on the fdmg for simplicitly.. no need for flood level heterogenieyt'
wsl_delta = 0.0
fprob_mult = 1.0 #needs to be a float for type matching
dmg_rat_f = False
#Fdmg.House pars
joist_space = 0.3
G_anchor_ht = 0.6
bsmt_egrd_code = 'plpm'
damp_func_code = 'seep'
bsmt_opn_ht_code = '*min(2.0)'
hse_skip_depth = -4 #depth to skip house damage calc
fhr_nm = ''
cont_val_scale = .25
write_fdmg_sum = True
dfeat_xclud_price = 0.0
#===========================================================================
# calculation parameters
#===========================================================================
res_fancy = None
gpwr_f = True #placeholder for __init__ calcs
fld_aep_l = None
dmg_dx_base = None #results frame for writing
plotr_d = None #dictionary of EAD plot workers
dfeats_d = dict() #{tag:dfeats}. see raise_all_dfeats()
fld_pwr_cnt = 0
seq = 0
#damage results/stats
dmgs_df = None
dmgs_df_wtail = None #damage summaries with damages for the tail logic included
ead_tot = 0
dmg_tot = 0
#===========================================================================
# calculation data holders
#===========================================================================
dmg_dx = None #container for full run results
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self,*vars, **kwargs):
logger = mod_logger.getChild('Fdmg')
#=======================================================================
# initilize cascade
#=======================================================================
super(Fdmg, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# object updates
#=======================================================================
self.reset_d.update({'ead_tot':0, 'dmgs_df':None, 'dmg_dx':None,\
'wsl_delta':0}) #update the rest attributes
#=======================================================================
# defaults
#=======================================================================
if not self.session._write_data:
self.write_fdmg_sum = False
if not self.dbg_fld_cnt == 'all':
self.dbg_fld_cnt = int(float(self.dbg_fld_cnt))
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
#model assignment
if not self.model.__repr__() == self.__repr__():
raise IOError
#check we have all the datos we want
dname_exp = np.array(('rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl'))
boolar = np.invert(np.isin(dname_exp, self.session.pars_df_d['datos']))
if np.any(boolar):
"""allowing this?"""
logger.warning('missing %i expected datos: %s'%(boolar.sum(), dname_exp[boolar]))
#=======================================================================
#setup functions
#=======================================================================
#par cleaners/ special loaders
logger.debug("load_hse_geo() \n")
self.load_hse_geo()
logger.info('load and clean dfunc data \n')
self.load_pars_dfunc(self.session.pars_df_d['dfunc']) #load the data functions to damage type table
logger.debug('\n')
self.setup_dmg_dx_cols()
logger.debug('load_submodels() \n')
self.load_submodels()
logger.debug('init_dyno() \n')
self.init_dyno()
#outputting setup
if self.write_fdmg_sum_fly:
self.fly_res_fpath = os.path.join(self.session.outpath, '%s fdmg_res_fly.csv'%self.session.tag)
logger.info('Fdmg model initialized as \'%s\' \n'%(self.name))
return
#===========================================================================
# def xxxcheck_pars(self): #check your data pars
# #pull the datas frame
# df_raw = self.session.pars_df_d['datos']
#
# #=======================================================================
# # check mandatory data objects
# #=======================================================================
# if not 'binv' in df_raw['name'].tolist():
# raise Error('missing \'binv\'!')
#
# #=======================================================================
# # check optional data objects
# #=======================================================================
# fdmg_tab_nl = ['rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl']
# boolidx = df_raw['name'].isin(fdmg_tab_nl)
#
# if not np.all(boolidx):
# raise IOError #passed some unexpected data names
#
# return
#===========================================================================
def load_submodels(self):
logger = self.logger.getChild('load_submodels')
self.state = 'load'
#=======================================================================
# data objects
#=======================================================================
'this is the main loader that builds all teh children as specified on the data tab'
logger.info('loading dat objects from \'fdmg\' tab')
logger.debug('\n \n')
#build datos from teh data tab
'todo: hard code these class types (rather than reading from teh control file)'
self.fdmgo_d = self.raise_children_df(self.session.pars_df_d['datos'], #df to raise on
kid_class = None) #should raise according to df entry
self.session.prof(state='load.fdmg.datos')
'WARNING: fdmgo_d is not set until after ALL the children on this tab are raised'
#attach special children
self.binv = self.fdmgo_d['binv']
"""NO! this wont hold resetting updates
self.binv_df = self.binv.childmeta_df"""
#=======================================================================
# flood tables
#=======================================================================
self.ftblos_d = self.raise_children_df(self.session.pars_df_d['flood_tbls'], #df to raise on
kid_class = datos.Flood_tbl) #should raise according to df entry
#make sure the one we are loking for is in there
if not self.session.flood_tbl_nm in list(self.ftblos_d.keys()):
raise Error('requested flood table name \'%s\' not found in loaded sets'%self.session.flood_tbl_nm)
'initial call which only udpates the binv_df'
self.set_area_prot_lvl()
if 'fhr_tbl' in list(self.fdmgo_d.keys()):
self.set_fhr()
#=======================================================================
# dfeats
#======================================================================
if self.session.load_dfeats_first_f & self.session.wdfeats_f:
logger.debug('raise_all_dfeats() \n')
self.dfeats_d = self.fdmgo_d['dfeat_tbl'].raise_all_dfeats()
#=======================================================================
# raise houses
#=======================================================================
#check we have all the acodes
self.check_acodes()
logger.info('raising houses')
logger.debug('\n')
self.binv.raise_houses()
self.session.prof(state='load.fdmg.houses')
'calling this here so all of the other datos are raised'
#self.rfda_curve = self.fdmgo_d['rfda_curve']
"""No! we need to get this in before the binv.reset_d['childmeta_df'] is set
self.set_area_prot_lvl() #apply the area protectino from teh named flood table"""
logger.info('loading floods')
logger.debug('\n \n')
self.load_floods()
self.session.prof(state='load.fdmg.floods')
logger.debug("finished with %i kids\n"%len(self.kids_d))
return
def setup_dmg_dx_cols(self): #get teh columns to use for fdmg results
"""
This is setup to generate a unique set of ordered column names with this logic
take the damage types
add mandatory fields
add user provided fields
"""
logger = self.logger.getChild('setup_dmg_dx_cols')
#=======================================================================
#build the basic list of column headers
#=======================================================================
#damage types at the head
col_os = OrderedSet(self.dmg_types) #put
#basic add ons
_ = col_os.update(['total', 'hse_depth', 'wsl', 'bsmt_egrd', 'anchor_el'])
#=======================================================================
# special logic
#=======================================================================
if self.dmg_rat_f:
for dmg_type in self.dmg_types:
_ = col_os.add('%s_rat'%dmg_type)
if not self.wsl_delta==0:
col_os.add('wsl_raw')
"""This doesnt handle runs where we start with a delta of zero and then add some later
for these, you need to expplicitly call 'wsl_raw' in the dmg_xtra_cols_fat"""
#ground water damage
if 'dmg_gw' in self.session.outpars_d['Flood']:
col_os.add('gw_f')
#add the dem if necessary
if 'gw_f' in col_os:
col_os.add('dem_el')
#=======================================================================
# set pars based on user provided
#=======================================================================
#s = self.session.outpars_d[self.__class__.__name__]
#extra columns for damage resulst frame
if self.db_f or self.session.write_fdmg_fancy:
logger.debug('including extra columns in outputs')
#clewan the extra cols
'todo: move this to a helper'
if hasattr(self.session, 'xtra_cols'):
try:
dc_l = eval(self.session.xtra_cols) #convert to a list
except:
logger.error('failed to convert \'xtra_cols\' to a list. check formatting')
raise IOError
else:
dc_l = ['wsl_raw', 'gis_area', 'acode_s', 'B_f_height', 'BS_ints','gw_f']
if not isinstance(dc_l, list): raise IOError
col_os.update(dc_l) #add these
self.dmg_df_cols = col_os
logger.debug('set dmg_df_cols as: %s'%self.dmg_df_cols)
return
def load_pars_dfunc(self,
df_raw=None): #build a df from the dfunc tab
"""
20190512: upgraded to handle nores and mres types
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_pars_dfunc')
#list of columns to expect
exp_colns = np.array(['acode','asector','place_code','dmg_code','dfunc_type','anchor_ht_code'])
if df_raw is None:
df_raw = self.session.pars_df_d['dfunc']
logger.debug('from df %s: \n %s'%(str(df_raw.shape), df_raw))
#=======================================================================
# clean
#=======================================================================
df1 = df_raw.dropna(axis='columns', how='all').dropna(axis='index', how='all') #drop rows with all na
df1 = df1.drop(columns=['note', 'rank'], errors='ignore') #drop some columns we dont need
#=======================================================================
# checking
#=======================================================================
#expected columns
boolar = np.invert(np.isin(exp_colns, df1.columns))
if np.any(boolar):
raise Error('missing %i expected columns\n %s'%(boolar.sum, exp_colns[boolar]))
#rfda garage logic
boolidx = np.logical_and(df1['place_code'] == 'G', df1['dfunc_type'] == 'rfda')
if np.any(boolidx):
raise Error('got dfunc_type = rfda for a garage curve (no such thing)')
#=======================================================================
# calculated columns
#=======================================================================
df2 = df1.copy()
df2['dmg_type'] = df2['place_code'] + df2['dmg_code']
"""as acode whill change, we want to keep the name static
df2['name'] = df2['acode'] + df2['dmg_type']"""
df2['name'] = df2['dmg_type']
#=======================================================================
# data loading
#=======================================================================
if 'tailpath' in df2.columns:
boolidx = ~pd.isnull(df2['tailpath']) #get dfuncs with data requests
self.load_raw_dfunc(df2[boolidx])
df2 = df2.drop(['headpath', 'tailpath'], axis = 1, errors='ignore') #drop these columns
#=======================================================================
# get special lists
#=======================================================================
#find total for exclusion
boolidx = np.invert((df2['place_code']=='total').astype(bool))
"""Im not using the total dfunc any more..."""
if not np.all(boolidx):
raise Error('i thinkn this has been disabled')
self.dmg_types = tuple(df2.loc[boolidx,'dmg_type'].dropna().unique().tolist())
self.dmg_codes = tuple(df2.loc[boolidx, 'dmg_code'].dropna().unique().tolist())
self.place_codes = tuple(df2.loc[boolidx,'place_code'].dropna().unique().tolist())
#=======================================================================
# #handle nulls
#=======================================================================
df3 = df2.copy()
for coln in ['dmg_type', 'name']:
df3.loc[:,coln] = df3[coln].replace(to_replace=np.nan, value='none')
#=======================================================================
# set this
#=======================================================================
self.session.pars_df_d['dfunc'] = df3
logger.debug('dfunc_df with %s'%str(df3.shape))
#=======================================================================
# get slice for houses
#=======================================================================
self.dfunc_mstr_df = df3[boolidx] #get this trim
return
"""
view(df3)
"""
def load_hse_geo(self): #special loader for hse_geo dxcol (from tab hse_geo)
logger = self.logger.getChild('load_hse_geo')
#=======================================================================
# load and clean the pars
#=======================================================================
df_raw = hp_pd.load_xls_df(self.session.parspath,
sheetname = 'hse_geo', header = [0,1], logger = logger)
df = df_raw.dropna(how='all', axis = 'index') #drop any rows with all nulls
self.session.pars_df_d['hse_geo'] = df
#=======================================================================
# build a blank starter for each house to fill
#=======================================================================
omdex = df.columns #get the original mdex
'probably a cleaner way of doing this'
lvl0_values = omdex.get_level_values(0).unique().tolist()
lvl1_values = omdex.get_level_values(1).unique().tolist()
lvl1_values.append('t')
newcols = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['place_code','finish_code'])
"""id prefer to use a shortend type (Float32)
but this makes all the type checking very difficult"""
geo_dxcol = pd.DataFrame(index = df.index, columns = newcols, dtype='Float32') #make the frame
self.geo_dxcol_blank = geo_dxcol
if self.db_f:
if np.any(pd.isnull(df)):
raise Error('got %i nulls in the hse_geo tab'%df.isna().sum().sum())
l = geo_dxcol.index.tolist()
if not l == ['area', 'height', 'per', 'inta']:
raise IOError
return
def load_raw_dfunc(self, meta_df_raw): #load raw data for dfuncs
logger = self.logger.getChild('load_raw_dfunc')
logger.debug('with df \'%s\''%(str(meta_df_raw.shape)))
d = dict() #empty container
meta_df = meta_df_raw.copy()
#=======================================================================
# loop through each row and load the data
#=======================================================================
for indx, row in meta_df.iterrows():
inpath = os.path.join(row['headpath'], row['tailpath'])
df = hp_pd.load_smart_df(inpath,
index_col =None,
logger = logger)
d[row['name']] = df.dropna(how = 'all', axis = 'index') #store this into the dictionaryu
logger.info('finished loading raw dcurve data on %i dcurves: %s'%(len(d), list(d.keys())))
self.dfunc_raw_d = d
return
def load_floods(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_floods')
logger.debug('setting floods df \n')
self.set_floods_df()
df = self.floods_df
logger.debug('raising floods \n')
d = self.raise_children_df(df, #build flood children
kid_class = Flood,
dup_sibs_f= True,
container = OrderedDict) #pass attributes from one tot eh next
#=======================================================================
# ordered by aep
#=======================================================================
fld_aep_od = OrderedDict()
for childname, childo in d.items():
if hasattr(childo, 'ari'):
fld_aep_od[childo.ari] = childo
else: raise IOError
logger.info('raised and bundled %i floods by aep'%len(fld_aep_od))
self.fld_aep_od = fld_aep_od
return
def set_floods_df(self): #build the flood meta data
logger = self.logger.getChild('set_floods_df')
df_raw = self.session.pars_df_d['floods']
df1 = df_raw.sort_values('ari').reset_index(drop=True)
df1['ari'] = df1['ari'].astype(np.int)
#=======================================================================
# slice for debug set
#=======================================================================
if self.db_f & (not self.dbg_fld_cnt == 'all'):
"""this would be much better with explicit typesetting"""
#check that we even have enough to do the slicing
if len(df1) < 2:
logger.error('too few floods for debug slicing. pass dbg_fld_cnt == all')
raise IOError
df2 = pd.DataFrame(columns = df1.columns) #make blank starter frame
dbg_fld_cnt = int(float(self.dbg_fld_cnt))
logger.info('db_f=TRUE. selecting %i (of %i) floods'%(dbg_fld_cnt, len(df1)))
#===================================================================
# try to pull out and add the 100yr
#===================================================================
try:
boolidx = df1.loc[:,'ari'] == self.fld_aep_spcl
if not boolidx.sum() == 1:
logger.debug('failed to locate 1 flood')
raise IOError
df2 = df2.append(df1[boolidx]) #add this row to the end
df1 = df1[~boolidx] #slice out this row
dbg_fld_cnt = max(0, dbg_fld_cnt - 1) #reduce the loop count by 1
dbg_fld_cnt = min(dbg_fld_cnt, len(df1)) #double check in case we are given a very short set
logger.debug('added the %s year flood to the list with dbg_fld_cnt %i'%(self.fld_aep_spcl, dbg_fld_cnt))
except:
logger.debug('failed to extract the special %i flood'%self.fld_aep_spcl)
df2 = df1.copy()
#===================================================================
# build list of extreme (low/high) floods
#===================================================================
evn_cnt = 0
odd_cnt = 0
for cnt in range(0, dbg_fld_cnt, 1):
if cnt % 2 == 0: #evens. pull from front
idxr = evn_cnt
evn_cnt += 1
else: #odds. pull from end
idxr = len(df1) - odd_cnt - 1
odd_cnt += 1
logger.debug('pulling flood with indexer %i'%(idxr))
ser = df1.iloc[idxr, :] #make thsi slice
df2 = df2.append(ser) #append this to the end
#clean up
df = df2.drop_duplicates().sort_values('ari').reset_index(drop=True)
logger.debug('built extremes flood df with %i aeps: %s'%(len(df), df.loc[:,'ari'].values.tolist()))
if not len(df) == int(self.dbg_fld_cnt):
raise IOError
else:
df = df1.copy()
if not len(df) > 0: raise IOError
self.floods_df = df
return
def set_area_prot_lvl(self): #assign the area_prot_lvl to the binv based on your tab
#logger = self.logger.getChild('set_area_prot_lvl')
"""
TODO: Consider moving this onto the binv and making the binv dynamic...
Calls:
handles for flood_tbl_nm
"""
logger = self.logger.getChild('set_area_prot_lvl')
logger.debug('assigning \'area_prot_lvl\' for \'%s\''%self.flood_tbl_nm)
#=======================================================================
# get data
#=======================================================================
ftbl_o = self.ftblos_d[self.flood_tbl_nm] #get the activated flood table object
ftbl_o.apply_on_binv('aprot_df', 'area_prot_lvl')
return True
def set_fhr(self): #assign the fhz bfe and zone from the fhr_tbl data
logger = self.logger.getChild('set_fhr')
logger.debug('assigning for \'fhz\' and \'bfe\'')
#get the data for this fhr set
fhr_tbl_o = self.fdmgo_d['fhr_tbl']
try:
df = fhr_tbl_o.d[self.fhr_nm]
except:
if not self.fhr_nm in list(fhr_tbl_o.d.keys()):
logger.error('could not find selected fhr_nm \'%s\' in the loaded rule sets: \n %s'
%(self.fhr_nm, list(fhr_tbl_o.d.keys())))
raise IOError
#=======================================================================
# loop through each series and apply
#=======================================================================
"""
not the most generic way of handling this...
todo:
add generic method to the binv
can take ser or df
updates the childmeta_df if before init
updates the children if after init
"""
for hse_attn in ['fhz', 'bfe']:
ser = df[hse_attn]
if not self.session.state == 'init':
#=======================================================================
# tell teh binv to update its houses
#=======================================================================
self.binv.set_all_hse_atts(hse_attn, ser = ser)
else:
logger.debug('set column \'%s\' onto the binv_df'%hse_attn)
self.binv.childmeta_df.loc[:,hse_attn] = ser #set this column in teh binvdf
"""I dont like this
fhr_tbl_o.apply_on_binv('fhz_df', 'fhz', coln = self.fhr_nm)
fhr_tbl_o.apply_on_binv('bfe_df', 'bfe', coln = self.fhr_nm)"""
return True
def get_all_aeps_classic(self): #get the list of flood aeps from the classic flood table format
'kept this special syntax reader separate in case we want to change th eformat of the flood tables'
flood_pars_df = self.session.pars_df_d['floods'] #load the data from the flood table
fld_aep_l = flood_pars_df.loc[:, 'ari'].values #drop the 2 values and convert to a list
return fld_aep_l
def run(self, **kwargs): #placeholder for simulation runs
logger = self.logger.getChild('run')
logger.debug('on run_cnt %i'%self.run_cnt)
self.run_cnt += 1
self.state='run'
#=======================================================================
# prechecks
#=======================================================================
if self.db_f:
if not isinstance(self.outpath, str):
raise IOError
logger.info('\n fdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmg')
logger.info('for run_cnt %i'%self.run_cnt)
self.calc_fld_set(**kwargs)
return
def setup_res_dxcol(self, #setup the results frame
fld_aep_l = None,
#dmg_type_list = 'all',
bid_l = None):
#=======================================================================
# defaults
#=======================================================================
if bid_l == None: bid_l = self.binv.bid_l
if fld_aep_l is None: fld_aep_l = list(self.fld_aep_od.keys()) #just get all teh keys from the dictionary
#if dmg_type_list=='all': dmg_type_list = self.dmg_types
#=======================================================================
# setup the dxind for writing
#=======================================================================
lvl0_values = fld_aep_l
lvl1_values = self.dmg_df_cols #include extra reporting columns
#fold these into a mdex (each flood_aep has all dmg_types)
columns = pd.MultiIndex.from_product([lvl0_values, lvl1_values],
names=['flood_aep','hse_atts'])
dmg_dx = pd.DataFrame(index = bid_l, columns = columns).sort_index() #make the frame
self.dmg_dx_base = dmg_dx.copy()
if self.db_f:
logger = self.logger.getChild('setup_res_dxcol')
if self.write_beg_hist:
fld_aep_l.sort()
columns = pd.MultiIndex.from_product([fld_aep_l, ['egrd', 'cond']],
names=['flood_aep','egrd'])
self.beg_hist_df = pd.DataFrame(index=bid_l, columns = columns)
logger.info('recording bsmt_egrd history with %s'%str(self.beg_hist_df.shape))
else:
self.beg_hist_df = None
"""
dmg_dx.columns
"""
return
def calc_fld_set(self, #calc flood damage for the flood set
fld_aep_l = None, #list of flood aeps to calcluate
#dmg_type_list = 'all', #list of damage types to calculate
bid_l = None, #list of building names ot calculate
wsl_delta = None, #delta value to add to all wsl
wtf = None, #optinonal flag to control writing of dmg_dx (otherwise session.write_fdmg_set_dx is used)
**run_fld): #kwargs to send to run_fld
'we could separate the object creation and the damage calculation'
"""
#=======================================================================
# INPUTS
#=======================================================================
fld_aep_l: list of floods to calc
this can be a custom list built by the user
extracted from the flood table (see session.get_ftbl_aeps)
loaded from the legacy rfda pars (session.rfda_pars.fld_aep_l)\
bid_l: list of ids (matching the mind varaible set under Fdmg)
#=======================================================================
# OUTPUTS
#=======================================================================
dmg_dx: dxcol of flood damage across all dmg_types and floods
mdex
lvl0: flood aep
lvl1: dmg_type + extra cols
I wanted to have this flexible, so the dfunc could pass up extra headers
couldnt get it to work. instead used a global list and acheck
new headers must be added to the gloabl list and Dfunc.
index
bldg_id
#=======================================================================
# TODO:
#=======================================================================
setup to calc across binvs as well
"""
#=======================================================================
# defaults
#=======================================================================
start = time.time()
logger = self.logger.getChild('calc_fld_set')
if wtf is None: wtf = self.session.write_fdmg_set_dx
if wsl_delta is None: wsl_delta= self.wsl_delta
#=======================================================================
# setup and load the results frame
#=======================================================================
#check to see that all of these conditions pass
if not np.all([bid_l is None, fld_aep_l is None]):
logger.debug('non default run. rebuild the dmg_dx_base')
#non default run. rebuild the frame
self.setup_res_dxcol( fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l)
elif self.dmg_dx_base is None: #probably the first run
if not self.run_cnt == 1: raise IOError
logger.debug('self.dmg_dx_base is None. rebuilding')
self.setup_res_dxcol(fld_aep_l = fld_aep_l,
#dmg_type_list = dmg_type_list,
bid_l = bid_l) #set it up with the defaults
dmg_dx = self.dmg_dx_base.copy() #just start witha copy of the base
#=======================================================================
# finish defaults
#=======================================================================
'these are all mostly for reporting'
if fld_aep_l is None: fld_aep_l = list(self.fld_aep_od.keys()) #just get all teh keys from the dictionary
""" leaving these as empty kwargs and letting floods handle
if bid_l == None: bid_l = binv_dato.bid_l
if dmg_type_list=='all': dmg_type_list = self.dmg_types """
"""
lvl0_values = dmg_dx.columns.get_level_values(0).unique().tolist()
lvl1_values = dmg_dx.columns.get_level_values(1).unique().tolist()"""
logger.info('calc flood damage (%i) floods w/ wsl_delta = %.2f'%(len(fld_aep_l), wsl_delta))
logger.debug('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \n')
#=======================================================================
# loop and calc eacch flood
#=======================================================================
fcnt = 0
first = True
for flood_aep in fld_aep_l: #lopo through and build each flood
#self.session.prof(state='%s.fdmg.calc_fld_set.%i'%(self.get_id(), fcnt)) #memory profiling
self.state = flood_aep
'useful for keeping track of what the model is doing'
#get teh flood
flood_dato = self.fld_aep_od[flood_aep] #pull thsi from the dictionary
logger.debug('getting dmg_df for %s'%flood_dato.name)
#===================================================================
# run sequence
#===================================================================
#get damage for these depths
dmg_df = flood_dato.run_fld(**run_fld) #add the damage df to this slice
if dmg_df is None: continue #skip this one
#===================================================================
# wrap up
#===================================================================
dmg_dx[flood_aep] = dmg_df #store into the frame
fcnt += 1
logger.debug('for flood_aep \'%s\' on fcnt %i got dmg_df %s \n'%(flood_aep, fcnt, str(dmg_df.shape)))
#===================================================================
# checking
#===================================================================
if self.db_f:
#check that the floods are increasing
if first:
first = False
last_aep = None
else:
if not flood_aep > last_aep:
raise IOError
last_aep = flood_aep
#=======================================================================
# wrap up
#=======================================================================
self.state = 'na'
if wtf:
filetail = '%s %s %s %s res_fld'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_to_file(filepath, dmg_dx, overwrite=True, index=True) #send for writing
self.dmg_dx = dmg_dx
stop = time.time()
logger.info('in %.4f secs calcd damage on %i of %i floods'%(stop - start, fcnt, len(fld_aep_l)))
return
def get_results(self): #called by Timestep.run_dt()
self.state='wrap'
logger = self.logger.getChild('get_results')
#=======================================================================
# optionals
#=======================================================================
s = self.session.outpars_d[self.__class__.__name__]
if (self.session.write_fdmg_fancy) or (self.session.write_fdmg_sum):
logger.debug("calc_summaries \n")
dmgs_df = self.calc_summaries()
self.dmgs_df = dmgs_df.copy()
else: dmgs_df = None
if ('ead_tot' in s) or ('dmg_df' in s):
logger.debug('\n')
self.calc_annulized(dmgs_df = dmgs_df, plot_f = False)
'this will also run calc_sumamries if it hasnt happened yet'
if 'dmg_tot' in s:
#get a cross section of the 'total' column across all flood_aeps and sum for all entries
self.dmg_tot = self.dmg_dx.xs('total', axis=1, level=1).sum().sum()
if ('bwet_cnt' in s) or ('bdamp_cnt' in s) or ('bdry_cnt' in s):
logger.debug('get_fld_begrd_cnt')
self.get_fld_begrd_cnt()
if 'fld_pwr_cnt' in s:
logger.debug('calc_fld_pwr_cnt \n')
cnt = 0
for aep, obj in self.fld_aep_od.items():
if obj.gpwr_f: cnt +=1
self.fld_pwr_cnt = cnt
self.binv.calc_binv_stats()
if self.session.write_fdmg_fancy:
self.write_res_fancy()
if self.write_fdmg_sum_fly: #write the results after each run
self.write_dmg_fly()
#update the bdmg_dx
if not self.session.bdmg_dx is None:
#add the timestep
bdmg_dx = pd.concat([self.dmg_dx],
keys=[self.tstep_o.name],
names=['tstep'],
axis=1,verify_integrity=True,copy=False)
bdmg_dx.index.name = self.mind
"""trying this as a column so we can append
#add the sim
bdmg_dx = pd.concat([bdmg_dx],
keys=[self.simu_o.name],
names=['simu'],
axis=1,verify_integrity=True,copy=False)"""
#join to the big
if len(self.session.bdmg_dx) == 0:
self.session.bdmg_dx = bdmg_dx.copy()
else:
self.session.bdmg_dx = self.session.bdmg_dx.join(bdmg_dx)
"""
view(self.session.bdmg_dx.join(bdmg_dx))
view(bdmg_dx)
view(self.session.bdmg_dx)
"""
#=======================================================================
# checks
#=======================================================================
if self.db_f:
self.check_dmg_dx()
logger.debug('finished \n')
def calc_summaries(self, #annualize the damages
fsts_l = ['gpwr_f', 'dmg_sw', 'dmg_gw'], #list of additional flood attributes to report in teh summary
dmg_dx=None,
plot=False, #flag to execute plot_dmgs() at the end. better to do this explicitly with an outputr
wtf=None):
"""
basically dropping dimensions on the outputs and adding annuzlied damages
#=======================================================================
# OUTPUTS
#=======================================================================
DROP BINV DIMENSIOn
dmgs_df: df with
columns: raw damage types, and annualized damage types
index: each flood
entries: total damage for binv
DROP FLOODS DIMENSIOn
aad_sum_ser
DROP ALL DIMENSIONS
ead_tot
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_summaries')
if dmg_dx is None: dmg_dx = self.dmg_dx.copy()
if plot is None: plot = self.session._write_figs
if wtf is None: wtf = self.write_fdmg_sum
#=======================================================================
# #setup frame
#=======================================================================
#get the columns
dmg_types = list(self.dmg_types) + ['total']
#=======================================================================
# #build the annualized damage type names
#=======================================================================
admg_types = []
for entry in dmg_types: admg_types.append(entry+'_a')
cols = dmg_types + ['prob', 'prob_raw'] + admg_types + fsts_l
dmgs_df = pd.DataFrame(columns = cols)
dmgs_df['ari'] = dmg_dx.columns.get_level_values(0).unique()
dmgs_df = dmgs_df.sort_values('ari').reset_index(drop=True)
#=======================================================================
# loop through and fill out the data
#=======================================================================
for index, row in dmgs_df.iterrows(): #loop through an dfill out
dmg_df = dmg_dx[row['ari']] #get the fdmg for this aep
#sum all the damage types
for dmg_type in dmg_types:
row[dmg_type] = dmg_df[dmg_type].sum() #sum them all up
#calc the probability
row['prob_raw'] = 1/float(row['ari']) #inverse of aep
row['prob'] = row['prob_raw'] * self.fprob_mult #apply the multiplier
#calculate the annualized damages
for admg_type in admg_types:
dmg_type = admg_type[:-2] #drop the a
row[admg_type] = row[dmg_type] * row['prob']
#===================================================================
# get stats from the floodo
#===================================================================
floodo = self.fld_aep_od[row['ari']]
for attn in fsts_l:
row[attn] = getattr(floodo, attn)
#===================================================================
# #add this row backinto the frame
#===================================================================
dmgs_df.loc[index,:] = row
#=======================================================================
# get series totals
#=======================================================================
dmgs_df = dmgs_df.sort_values('prob').reset_index(drop='true')
#=======================================================================
# closeout
#=======================================================================
logger.debug('annualized %i damage types for %i floods'%(len(dmg_type), len(dmgs_df)))
if wtf:
filetail = '%s dmg_sumry'%(self.session.state)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_to_file(filepath, dmgs_df, overwrite=True, index=False) #send for writing
logger.debug('set data with %s and cols: %s'%(str(dmgs_df.shape), dmgs_df.columns.tolist()))
if plot:
self.plot_dmgs(wtf=wtf)
#=======================================================================
# post check
#=======================================================================
if self.db_f:
#check for sort logic
if not dmgs_df.loc[:,'prob'].is_monotonic:
raise IOError
if not dmgs_df['total'].iloc[::-1].is_monotonic: #flip the order
logger.warning('bigger floods arent causing more damage')
'some of the flood tables seem bad...'
#raise IOError
#all probabilities should be larger than zero
if not np.all(dmgs_df.loc[:,'prob'] > 0):
raise IOError
return dmgs_df
def calc_annulized(self, dmgs_df = None,
ltail = None, rtail = None, plot_f=None,
dx = 0.001): #get teh area under the damage curve
"""
#=======================================================================
# INPUTS
#=======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
'none': don't extend the tail
rtail: right trail treatment (high prob low damage)
'none': don't extend
'2year': extend to zero damage at the 2 year aep
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('calc_annulized')
if ltail is None: ltail = self.ca_ltail
if rtail is None: rtail = self.ca_rtail
'plotter ignores passed kwargs here'
if plot_f is None: plot_f= self.session._write_figs
#=======================================================================
# get data
#=======================================================================
if dmgs_df is None:
dmgs_df = self.calc_summaries()
#df_raw = self.data.loc[:,('total', 'prob', 'ari')].copy().reset_index(drop=True)
'only slicing columns for testing'
df = dmgs_df.copy().reset_index(drop=True)
#=======================================================================
# shortcuts
#=======================================================================
if len(df) <2 :
logger.warning('not enough floods to calculate EAD')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
if df['total'].sum() < 1:
logger.warning('calculated zero damages!')
self.ead_tot = 0
self.dmgs_df_wtail = df
return
logger.debug("with ltail = \'%s\', rtail = \'%s\' and df %s"%(ltail, rtail, str(df.shape)))
#=======================================================================
# left tail treatment
#=======================================================================
if ltail == 'flat':
#zero probability
'assume 1000yr flood is the max damage'
max_dmg = df['total'].max()*1.0001
df.loc[-1, 'prob'] = 0
df.loc[-1, 'ari'] = 999999
df.loc[-1, 'total'] = max_dmg
logger.debug('ltail == flat. duplicated danage %.2f at prob 0'%max_dmg)
elif ltail == 'none':
pass
else: raise IOError
'todo: add option for value multiplier'
#=======================================================================
# right tail
#=======================================================================
if rtail == 'none':
pass
elif hp_basic.isnum(rtail):
rtail_yr = float(rtail)
rtail_p = 1.0 / rtail_yr
max_p = df['prob'].max()
#floor check
if rtail_p < max_p:
logger.error('rtail_p (%.2f) < max_p (%.2f)'%(rtail_p, max_p))
raise IOError
#same
elif rtail_p == max_p:
logger.debug("rtail_p == min(xl. no changes made")
else:
logger.debug("adding zero damage for aep = %.1f"%rtail_yr)
#zero damage
'assume no damage occurs at the passed rtail_yr'
loc = len(df)
df.loc[loc, 'prob'] = rtail_p
df.loc[loc, 'ari'] = 1.0/rtail_p
df.loc[loc, 'total'] = 0
"""
hp_pd.view_web_df(self.data)
"""
else: raise IOError
#=======================================================================
# clean up
#=======================================================================
df = df.sort_index() #resort the index
if self.db_f:
'these should still hold'
if not df.loc[:,'prob'].is_monotonic:
raise IOError
"""see above
if not df['total'].iloc[::-1].is_monotonic:
raise IOError"""
x, y = df['prob'].values.tolist(), df['total'].values.tolist()
#=======================================================================
# find area under curve
#=======================================================================
try:
#ead_tot = scipy.integrate.simps(y, x, dx = dx, even = 'avg')
'this was giving some weird results'
ead_tot = scipy.integrate.trapz(y, x, dx = dx)
except:
raise Error('scipy.integrate.trapz failed')
logger.info('found ead_tot = %.2f $/yr from %i points with tail_codes: \'%s\' and \'%s\''
%(ead_tot, len(y), ltail, rtail))
self.ead_tot = ead_tot
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if pd.isnull(ead_tot):
raise IOError
if not isinstance(ead_tot, float):
raise IOError
if ead_tot <=0:
"""
view(df)
"""
raise Error('got negative damage! %.2f'%ead_tot)
#=======================================================================
# update data with tails
#=======================================================================
self.dmgs_df_wtail = df.sort_index().reset_index(drop=True)
#=======================================================================
# generate plot
#=======================================================================
if plot_f:
self.plot_dmgs(self, right_nm = None, xaxis = 'prob', logx = False)
return
def get_fld_begrd_cnt(self): #tabulate the bsmt_egrd counts from each flood
logger = self.logger.getChild('get_fld_begrd_cnt')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
#lvl1_values = dmg_dx.columns.get_level_values(0).unique().tolist()
#get all teh basement egrade types
df1 = dmg_dx.loc[:,idx[:, 'bsmt_egrd']] #get a slice by level 2 values
#get occurances by value
d = hp_pd.sum_occurances(df1, logger=logger)
#=======================================================================
# loop and calc
#=======================================================================
logger.debug('looping through %i bsmt_egrds: %s'%(len(d), list(d.keys())))
for bsmt_egrd, cnt in d.items():
attn = 'b'+bsmt_egrd +'_cnt'
logger.debug('for \'%s\' got %i'%(attn, cnt))
setattr(self, attn, cnt)
logger.debug('finished \n')
def check_dmg_dx(self): #check logical consistency of the damage results
logger = self.logger.getChild('check_dmg_dx')
#=======================================================================
# data setup
#=======================================================================
dmg_dx = self.dmg_dx.copy()
mdex = dmg_dx.columns
aep_l = mdex.get_level_values(0).astype(int).unique().values.tolist()
aep_l.sort()
#=======================================================================
# check that each flood increases in damage
#=======================================================================
total = None
aep_last = None
for aep in aep_l:
#get this slice
df = dmg_dx[aep]
if total is None:
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
total = df.loc[:,boolcol].sum().sum()
if not aep == min(aep_l):
raise IOError
else:
newtot = df.loc[:,boolcol].sum().sum()
if not newtot >= total:
logger.warning('aep %s tot %.2f < aep %s %.2f'%(aep, newtot, aep_last, total))
#raise IOError
#print 'new tot %.2f > oldtot %.2f'%(newtot, total)
total = newtot
aep_last = aep
return
def check_acodes(self, #check you have curves for all the acodes
ac_sec_d = None, #set of Loaded acodes {acode: asecotr}
ac_req_l = None, #set of requested acodes
dfunc_df = None, #contorl file page for the dfunc parameters
):
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('check_acodes')
if ac_sec_d is None: ac_sec_d = self.acode_sec_d
if ac_req_l is None: ac_req_l = self.binv.acode_l #pull from the binv
if dfunc_df is None: dfunc_df = self.session.pars_df_d['dfunc']
log.debug('checking acodes requested by binv against %i available'%len(ac_sec_d))
"""
for k, v in ac_sec_d.items():
print(k, v)
"""
#=======================================================================
# conversions
#=======================================================================
ava_ar = np.array(list(ac_sec_d.keys())) #convert availables to an array
req_ar = np.array(ac_req_l)
#get the pars set
pars_ar_raw = dfunc_df['acode'].dropna().unique()
pars_ar = pars_ar_raw[pars_ar_raw!='none'] #drop the nones
#=======================================================================
# check we loaded everything we requested in the pars
#=======================================================================
boolar = np.invert(np.isin(pars_ar, ava_ar))
if np.any(boolar):
raise Error('%i acodes requested by the pars were not loaded: \n %s'
%(boolar.sum(), req_ar[boolar]))
#=======================================================================
# check the binv doesnt have anything we dont have pars for
#=======================================================================
boolar = np.invert(np.isin(req_ar, pars_ar))
if np.any(boolar):
raise Error('%i binv acodes not found on the \'dfunc\' tab: \n %s'
%(boolar.sum(), req_ar[boolar]))
return
def wrap_up(self):
#=======================================================================
# update asset containers
#=======================================================================
"""
#building inventory
'should be flagged for updating during House.notify()'
if self.binv.upd_kid_f:
self.binv.update()"""
"""dont think we need this here any more.. only on udev.
keeping it just to be save"""
self.last_tstep = copy.copy(self.time)
self.state='close'
def write_res_fancy(self, #for saving results in xls per tab. called as a special outputr
dmg_dx=None,
include_ins = False,
include_raw = False,
include_begh = True):
"""
#=======================================================================
# INPUTS
#=======================================================================
include_ins: whether ot add inputs as tabs.
ive left this separate from the 'copy_inputs' flag as it is not a true file copy of the inputs
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('write_res_fancy')
if dmg_dx is None: dmg_dx = self.dmg_dx
if dmg_dx is None:
logger.warning('got no dmg_dx. skipping')
return
#=======================================================================
# setup
#=======================================================================
od = OrderedDict()
#=======================================================================
# add the parameters
#=======================================================================
#get the blank frame
df = pd.DataFrame(columns = ['par','value'] )
df['par'] = list(self.try_inherit_anl)
for indx, row in df.iterrows():
df.iloc[indx, 1] = getattr(self, row['par']) #set this value
od['pars'] = df
#=======================================================================
# try and add damage summary
#=======================================================================
if not self.dmgs_df is None:
od['dmg summary'] = self.dmgs_df
#=======================================================================
# #get theh dmg_dx decomposed
#=======================================================================
od.update(hp_pd.dxcol_to_df_set(dmg_dx, logger=self.logger))
#=======================================================================
# #add dmg_dx as a raw tab
#=======================================================================
if include_raw:
od['raw_res'] = dmg_dx
#=======================================================================
# add inputs
#=======================================================================
if include_ins:
for dataname, dato in self.kids_d.items():
if hasattr(dato, 'data') & hp_pd.isdf(dato.data):
od[dataname] = dato.data
#=======================================================================
# add debuggers
#=======================================================================
if include_begh:
if not self.beg_hist_df is None:
od['beg_hist'] = self.beg_hist_df
#=======================================================================
# #write to excel
#=======================================================================
filetail = '%s %s %s %s fancy_res'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name)
filepath = os.path.join(self.outpath, filetail)
hp_pd.write_dfset_excel(od, filepath, engine='xlsxwriter', logger=self.logger)
return
def write_dmg_fly(self): #write damage results after each run
logger = self.logger.getChild('write_dmg_fly')
dxcol = self.dmg_dx #results
#=======================================================================
# build the resuults summary series
#=======================================================================
#get all the flood aeps
lvl0vals = dxcol.columns.get_level_values(0).unique().astype(int).tolist()
#blank holder
res_ser = pd.Series(index = lvl0vals)
#loop and calc sums for each flood
for aep in lvl0vals:
res_ser[aep] = dxcol.loc[:,(aep,'total')].sum()
#add extras
if not self.ead_tot is None:
res_ser['ead_tot'] = self.ead_tot
res_ser['dt'] = self.tstep_o.year
res_ser['sim'] = self.simu_o.ind
lindex = '%s.%s'%(self.simu_o.name, self.tstep_o.name)
hp_pd.write_fly_df(self.fly_res_fpath,res_ser, lindex = lindex,
first = self.write_dmg_fly_first, tag = 'fdmg totals',
db_f = self.db_f, logger=logger) #write results on the fly
self.write_dmg_fly_first = False
return
def get_plot_kids(self): #raise kids for plotting the damage summaries
logger = self.logger.getChild('get_plot_kids')
#=======================================================================
# get slice of aad_fmt_df matching the aad cols
#=======================================================================
aad_fmt_df = self.session.pars_df_d['dmg_sumry_plot'] #pull teh formater pars from the tab
dmgs_df = self.dmgs_df
self.data = dmgs_df
boolidx = aad_fmt_df.loc[:,'name'].isin(dmgs_df.columns) #get just those formaters with data in the aad
aad_fmt_df_slice = aad_fmt_df[boolidx] #get this slice3
"""
hp_pd.view_web_df(self.data)
hp_pd.view_web_df(df)
hp_pd.view_web_df(aad_fmt_df_slice)
aad_fmt_df_slice.columns
"""
#=======================================================================
# formatter kids setup
#=======================================================================
"""need to run this every time so the data is updated
TODO: allow some updating here so we dont have to reduibl deach time
if self.plotter_kids_dict is None:"""
self.plotr_d = self.raise_children_df(aad_fmt_df_slice, kid_class = hp_data.Data_o)
logger.debug('finisehd \n')
#===============================================================================
# def plot_dmgs(self, wtf=None, right_nm = None, xaxis = 'ari', logx = True,
# ylims = None, #tuple of min/max values for the y-axis
# ): #plot curve of aad
# """
# see tab 'aad_fmt' to control what is plotted and formatting
# """
# #=======================================================================
# # defaults
# #=======================================================================
# logger = self.logger.getChild('plot_dmgs')
# if wtf == None: wtf = self.session._write_figs
#
# #=======================================================================
# # prechecks
# #=======================================================================
# if self.db_f:
# if self.dmgs_df is None:
# raise IOError
#
#
# #=======================================================================
# # setup
# #=======================================================================
# if not ylims is None:
# try:
# ylims = eval(ylims)
# except:
# pass
#
# #get the plot workers
# if self.plotr_d is None:
# self.get_plot_kids()
#
# kids_d = self.plotr_d
#
# title = '%s-%s-%s EAD-ARI plot on %i objs'%(self.session.tag, self.simu_o.name, self.name, len(self.binv.childmeta_df))
# logger.debug('with \'%s\''%title)
#
# if not self.tstep_o is None:
# title = title + ' for %s'%self.tstep_o.name
#
# #=======================================================================
# # update plotters
# #=======================================================================
# logger.debug('updating plotters with my data')
#
# #get data
# data_og = self.data.copy() #store this for later
#
# if self.dmgs_df_wtail is None:
# df = self.dmgs_df.copy()
# else:
# df = self.dmgs_df_wtail.copy()
#
# df = df.sort_values(xaxis, ascending=True)
#
# #reformat data
# df.set_index(xaxis, inplace = True)
#
# #re set
# self.data = df
#
# #tell kids to refresh their data from here
# for gid, obj in kids_d.items(): obj.data = obj.loadr_vir()
#
# self.data = data_og #reset the data
#
# #=======================================================================
# # get annotation
# #=======================================================================
# val_str = '$' + "{:,.2f}".format(self.ead_tot/1e6)
# #val_str = "{:,.2f}".format(self.ead_tot)
# """
# txt = 'total aad: $%s \n tail kwargs: \'%s\' and \'%s\' \n'%(val_str, self.ca_ltail, self.ca_rtail) +\
# 'binv.cnt = %i, floods.cnt = %i \n'%(self.binv.cnt, len(self.fld_aep_od))"""
#
#
# txt = 'total EAD = %s'%val_str
#
#
# #=======================================================================
# #plot the workers
# #=======================================================================
# #twinx
# if not right_nm is None:
# logger.debug('twinning axis with name \'%s\''%right_nm)
# title = title + '_twin'
# # sort children into left/right buckets by name to plot on each axis
# right_pdb_d, left_pdb_d = self.sort_buckets(kids_d, right_nm)
#
# if self.db_f:
# if len (right_pdb_d) <1: raise IOError
#
# #=======================================================================
# # #send for plotting
# #=======================================================================
# 'this plots both bundles by their data indexes'
# ax1, ax2 = self.plot_twinx(left_pdb_d, right_pdb_d,
# logx=logx, xlab = xaxis, title=title, annot = txt,
# wtf=False)
# 'cant figure out why teh annot is plotting twice'
#
# ax2.set_ylim(0, 1) #prob limits
# legon = False
# else:
# logger.debug('single axis')
#
# try:
# del kids_d['prob']
# except:
# pass
#
# pdb = self.get_pdb_dict(list(kids_d.values()))
#
# ax1 = self.plot_bundles(pdb,
# logx=logx, xlab = 'ARI', ylab = 'damage ($ 10^6)', title=title, annot = txt,
# wtf=False)
#
# legon=True
#
# #hatch
# #=======================================================================
# # post formatting
# #=======================================================================
# #set axis limits
# if xaxis == 'ari': ax1.set_xlim(1, 1000) #aep limits
# elif xaxis == 'prob': ax1.set_xlim(0, .6)
#
# if not ylims is None:
# ax1.set_ylim(ylims[0], ylims[1])
#
#
# #ax1.set_ylim(0, ax1.get_ylim()[1]) #$ limits
#
#
# #=======================================================================
# # format y axis labels
# #======================================================= ================
# old_tick_l = ax1.get_yticks() #get teh old labels
#
# # build the new ticks
# l = []
#
# for value in old_tick_l:
# new_v = '$' + "{:,.0f}".format(value/1e6)
# l.append(new_v)
#
# #apply the new labels
# ax1.set_yticklabels(l)
#
# """
# #add thousands comma
# ax1.get_yaxis().set_major_formatter(
# #matplotlib.ticker.FuncFormatter(lambda x, p: '$' + "{:,.2f}".format(x/1e6)))
#
# matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))"""
#
# if xaxis == 'ari':
# ax1.get_xaxis().set_major_formatter(
# matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
#
#
# if wtf:
# fig = ax1.figure
# savepath_raw = os.path.join(self.outpath,title)
# flag = hp.plot.save_fig(self, fig, savepath_raw=savepath_raw, dpi = self.dpi, legon=legon)
# if not flag: raise IOError
#
#
# #plt.close()
# return
#===============================================================================
class Flood(
hp_dyno.Dyno_wrap,
hp_sim.Sim_o,
hp_oop.Parent, #flood object worker
hp_oop.Child):
#===========================================================================
# program pars
#===========================================================================
gpwr_f = False #grid power flag palceholder
#===========================================================================
# user defineid pars
#===========================================================================
ari = None
#loaded from flood table
#area exposure grade. control for areas depth decision algorhithim based on the performance of macro structures (e.g. dykes).
area_egrd00 = ''
area_egrd01 = ''
area_egrd02 = ''
area_egrd00_code = None
area_egrd01_code = None
area_egrd02_code = None
#===========================================================================
# calculated pars
#===========================================================================
hdep_avg = 0 #average house depth
#damate properties
total = 0
BS = 0
BC = 0
MS = 0
MC = 0
dmg_gw = 0
dmg_sw = 0
dmg_df_blank =None
wsl_avg = 0
#===========================================================================
# data containers
#===========================================================================
hdmg_cnt = 0
dmg_df = None
dmg_res_df = None
#bsmt_egrd counters. see get_begrd_cnt()
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self, parent, *vars, **kwargs):
logger = mod_logger.getChild('Flood')
logger.debug('start _init_')
#=======================================================================
# #attach custom vars
#=======================================================================
self.inherit_parent_ans=set(['mind', 'dmg_types'])
#=======================================================================
# initilize cascade
#=======================================================================
super(Flood, self).__init__(parent, *vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# common setup
#=======================================================================
if self.sib_cnt == 0:
#update the resets
pass
#=======================================================================
# unique setup
#=======================================================================
""" handled by the outputr
self.reset_d.update({'hdmg_cnt':0})"""
self.ari = int(self.ari)
self.dmg_res_df = pd.DataFrame() #set as an empty frame for output handling
#=======================================================================
# setup functions
#=======================================================================
self.set_gpwr_f()
logger.debug('set_dmg_df_blank()')
self.set_dmg_df_blank()
logger.debug('get your water levels from the selected wsl table \n')
self.set_wsl_frm_tbl()
logger.debug('set_area_egrd()')
self.set_area_egrd()
logger.debug('get_info_from_binv()')
df = self.get_info_from_binv() #initial run to set blank frame
self.set_wsl_from_egrd(df)
""" moved into set_wsl_frm_tbl()
logger.debug('\n')
self.setup_dmg_df()"""
self.init_dyno()
self.logger.debug('__init___ finished \n')
def set_dmg_df_blank(self):
logger = self.logger.getChild('set_dmg_df_blank')
binv_df = self.model.binv.childmeta_df
colns = OrderedSet(self.model.dmg_df_cols.tolist() + ['wsl', 'area_prot_lvl'])
'wsl should be redundant'
#get boolean
self.binvboolcol = binv_df.columns.isin(colns) #store this for get_info_from_binv()
#get teh blank frame
self.dmg_df_blank = pd.DataFrame(columns = colns, index = binv_df.index) #get the blank frame
'this still needs the wsl levels attached based on your area exposure grade'
logger.debug('set dmg_df_blank with %s'%(str(self.dmg_df_blank.shape)))
return
def set_gpwr_f(self): #set your power flag
if self.is_frozen('gpwr_f'): return True#shortcut for frozen
logger = self.logger.getChild('set_gpwr_f')
#=======================================================================
# get based on aep
#=======================================================================
min_aep = int(self.model.gpwr_aep)
if self.ari < min_aep: gpwr_f = True
else: gpwr_f = False
logger.debug('for min_aep = %i, set gpwr_f = %s'%(min_aep, gpwr_f))
#update handler
self.handle_upd('gpwr_f', gpwr_f, proxy(self), call_func = 'set_gpwr_f')
return True
def set_wsl_frm_tbl(self, #build the raw wsl data from the passed flood table
flood_tbl_nm = None, #name of flood table to pull raw data from
#bid_l=None,
):
"""
here we get the raw values
these are later modified by teh area_egrd with self.get_wsl_from_egrd()
#=======================================================================
# INPUTS
#=======================================================================
flood_tbl_df_raw: raw df of the classic flood table
columns:` count, aep, aep, aep, aep....\
real_columns: bldg_id, bid, depth, depth, depth, etc...
index: unique arbitrary
wsl_ser: series of wsl for this flood on each bldg_id
#=======================================================================
# calls
#=======================================================================
dynp handles Fdmg.flood_tbl_nm
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_frm_tbl')
if flood_tbl_nm is None: flood_tbl_nm = self.model.flood_tbl_nm
#=======================================================================
# get data
#=======================================================================
#pull the raw flood tables
ftbl_o = self.model.ftblos_d[flood_tbl_nm]
wsl_d = ftbl_o.wsl_d
df = pd.DataFrame(index = list(wsl_d.values())[0].index) #blank frame from teh first entry
#=======================================================================
# loop and apply for each flood type
#=======================================================================
for ftype, df1 in wsl_d.items():
#=======================================================================
# data checks
#=======================================================================
if self.db_f:
if not ftype in ['wet', 'dry', 'damp']:
raise IOError
df_raw =df1.copy()
if not self.ari in df_raw.columns:
logger.error('the flood provided on the \'floods\' tab (\'%s\') does not have a match in the flood table: \n %s'%
(self.ari, self.model.ftblos_d[flood_tbl_nm].filepath))
raise IOError
#=======================================================================
# slice for this flood
#=======================================================================
boolcol = df1.columns == self.ari #slice for this aep
#get the series for this
wsl_ser = df1.loc[:, boolcol].iloc[:,0].astype(float)
#wsl_ser = wsl_ser.rename(ftype) #rename with the aep
'binv slicing moved to Flood_tbl.clean_data()'
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if len(wsl_ser) <1:
raise IOError
""" allowing
#check for nuls
if np.any(pd.isnull(wsl_ser2)):
raise IOError"""
#=======================================================================
# wrap up report and attach
#=======================================================================
df[ftype] = wsl_ser
logger.debug('from \'%s\' for \'%s\' got wsl_ser %s for aep: %i'
%(flood_tbl_nm, ftype, str(wsl_ser.shape), self.ari))
self.wsl_df = df #set this
'notusing dy nps'
if self.session.state == 'init':
self.reset_d['wsl_df'] = df.copy()
return True
def set_area_egrd(self): #pull your area exposure grade from somewhere
"""
#=======================================================================
# calls
#=======================================================================
self.__init__()
dynp handles: Fdmg.flood_tbl_nm (just in case we are pulling from there
"""
#=======================================================================
# dependency check
#=======================================================================
if not self.session.state=='init':
dep_l = [([self.model], ['set_area_prot_lvl'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_area_egrd'):
return False
logger = self.logger.getChild('set_area_egrd')
#=======================================================================
# steal egrd from elsewhere table if asked
#=======================================================================
for cnt in range(0,3,1): #loop through each one
attn = 'area_egrd%02d'%cnt
area_egrd_code = getattr(self, attn + '_code')
if area_egrd_code in ['dry', 'damp', 'wet']:
area_egrd = area_egrd_code
#===================================================================
# pull from teh flood table
#===================================================================
elif area_egrd_code == '*ftbl':
ftbl_o = self.model.ftblos_d[self.model.flood_tbl_nm] #get the flood tabl object
area_egrd = getattr(ftbl_o, attn) #get from teh table
#===================================================================
# pull from teh model
#===================================================================
elif area_egrd_code == '*model':
area_egrd = getattr(self.model, attn) #get from teh table
else:
logger.error('for \'%s\' got unrecognized area_egrd_code: \'%s\''%(attn, area_egrd_code))
raise IOError
#===================================================================
# set these
#===================================================================
self.handle_upd(attn, area_egrd, weakref.proxy(self), call_func = 'set_area_egrd')
'this should triger generating a new wsl set to teh blank_dmg_df'
logger.debug('set \'%s\' from \'%s\' as \'%s\''
%(attn, area_egrd_code,area_egrd))
if self.db_f:
if not area_egrd in ['dry', 'damp', 'wet']:
raise IOError
return True
def set_wsl_from_egrd(self, #calculate the wsl based on teh area_egrd
df = None):
"""
This is a partial results retrival for non damage function results
TODO:
consider checking for depednency on House.area_prot_lvl
#=======================================================================
# calls
#=======================================================================
self.__init__
dynp handles for:
Flood.area_egrd##
Fdmg.flood_tbl_nm
if area_egrd_code == *model, this loop isnt really necessary
"""
#=======================================================================
# check dependencies and frozen
#=========================================================== ============
if not self.session.state=='init':
dep_l = [([self], ['set_area_egrd', 'set_wsl_frm_tbl'])]
if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_wsl_from_egrd'):
return False
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('set_wsl_from_egrd')
#if wsl_delta is None: wsl_delta = self.model.wsl_delta
#=======================================================================
# get data
#=======================================================================
if df is None: df = self.get_info_from_binv()
'need to have updated area_prot_lvls'
#=======================================================================
# precheck
#=======================================================================
if self.db_f:
if not isinstance(df, pd.DataFrame): raise IOError
if not len(df) > 0: raise IOError
#=======================================================================
# add the wsl for each area_egrd
#=======================================================================
for prot_lvl in range(0,3,1): #loop through each one
#get your grade fro this prot_lvl
attn = 'area_egrd%02d'%prot_lvl
area_egrd = getattr(self, attn)
#identify the housese for this protection level
boolidx = df.loc[:,'area_prot_lvl'] == prot_lvl
if boolidx.sum() == 0: continue
#give them the wsl corresponding to this grade
df.loc[boolidx, 'wsl'] = self.wsl_df.loc[boolidx,area_egrd]
#set a tag for the area_egrd
if 'area_egrd' in df.columns:
df.loc[boolidx, 'area_egrd'] = area_egrd
logger.debug('for prot_lvl %i, set %i wsl from \'%s\''%(prot_lvl, boolidx.sum(), area_egrd))
#=======================================================================
# set this
#=======================================================================
self.dmg_df_blank = df
#=======================================================================
# post check
#=======================================================================
logger.debug('set dmg_df_blank with %s'%str(df.shape))
if self.session.state=='init':
self.reset_d['dmg_df_blank'] = df.copy()
if self.db_f:
if np.any(pd.isnull(df['wsl'])):
raise Error('got some wsl nulls')
return True
"""
hp_pd.v(df)
hp_pd.v(self.dmg_df_blank)
"""
def run_fld(self, **kwargs): #shortcut to collect all the functions for a simulation ru n
self.run_cnt += 1
dmg_df_blank = self.get_info_from_binv()
"""
view(dmg_df_blank)
"""
dmg_df = self.get_dmg_set(dmg_df_blank, **kwargs)
if self.db_f:
self.check_dmg_df(dmg_df)
'leaving this here for simplicity'
self.calc_statres_flood(dmg_df)
return dmg_df
def get_info_from_binv(self):
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_info_from_binv')
binv_df = self.model.binv.childmeta_df
#pull static values
binvboolcol = self.binvboolcol
df = self.dmg_df_blank.copy()
'this should have wsl added to it from set_wsl_from_egrd()'
if self.db_f:
if not len(binvboolcol) == len(binv_df.columns):
logger.warning('got length mismatch between binvboolcol (%i) and the binv_df columns (%i)'%
(len(binvboolcol), len(binv_df.columns)))
'pandas will handle this mistmatch.. just ignores the end'
#=======================================================================
# #update with values from teh binv
#=======================================================================
df.update(binv_df.loc[:,binvboolcol], overwrite=True) #update from all the values in teh binv
logger.debug('retreived %i values from the binv_df on: %s'
%(binv_df.loc[:,binvboolcol].count().count(), binv_df.loc[:,binvboolcol].columns.tolist()))
#=======================================================================
# macro calcs
#=======================================================================
if 'hse_depth' in df.columns:
df['hse_depth'] = df['wsl'] - df['anchor_el']
#groudn water damage flag
if 'gw_f' in df.columns:
df.loc[:,'gw_f'] = df['dem_el'] > df['wsl'] #water is below grade
if self.db_f:
if 'bsmt_egrd' in binv_df.columns:
raise IOError
return df
def get_dmg_set(self, #calcluate the damage for each house
dmg_df, #pre-filled frame for calculating damage results onto
#dmg_type_list='all',
#bid_l = None,
#wsl_delta = None,
dmg_rat_f =None, #includt eh damage ratio in results
):
"""
20190521:
I dont really like how this is structured with one mega for loop trying to grab everything.
Instead, everything should be handled by Fdmg (which really should be wrapped back into the Session)
Each calculation/value (e.g. damage, static values, etc.) should be calculated in a dedicated loop
then we can control logic based on each value type
the controller can collect all of these results during wrap up
rather then trying to pass everything to each loop
#=======================================================================
# INPUTS
#=======================================================================
depth_ser: series of depths (for this flood) with index = bldg_id
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('get_dmg_set(%s)'%self.get_id())
if dmg_rat_f is None: dmg_rat_f = self.model.dmg_rat_f
hse_od = self.model.binv.hse_od #ordred dictionary by bid: hse_dato
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
if not isinstance(dmg_df, pd.DataFrame):
raise IOError
boolidx = dmg_df.index.isin(list(hse_od.keys()))
if not np.all(boolidx):
logger.error('some of the bldg_ids in the wsl_ser were not found in the binv: \n %s'
%dmg_df.index[~boolidx])
raise IOError
#check the damage columns are empty
boolcol = np.isin(dmg_df.columns, ['MS', 'MC', 'BS', 'BC', 'GS', 'total']) #identify damage columns
if not np.all(pd.isnull(dmg_df.loc[:,boolcol])):
raise IOError
#=======================================================================
# frame setup
#=======================================================================
#identify columns containing damage results
dmgbool = np.logical_or(
dmg_df.columns.isin(self.model.dmg_types), #damages
pd.Series(dmg_df.columns).str.contains('_rat').values
) #damage ratios
#=======================================================================
# get teh damage for each house
#=======================================================================
logger.debug('getting damage for %s entries'%(str(dmg_df.shape)))
"""generally no memory added during these
self.session.prof(state='%s.get_dmg_set.loop'%(self.name)) #memory profiling"""
cnt = 0
first = True
for index, row in dmg_df.iterrows(): #loop through each row
#===================================================================
# pre-printouts
#===================================================================
#self.session.prof(state='%s.get_dmg_set.%i'%(self.name, cnt)) #memory profiling
cnt +=1
if cnt%self.session._logstep == 0: logger.info(' (%i/%i)'%(cnt, len(dmg_df)))
#===================================================================
# retrive info
#===================================================================
hse_obj = hse_od[index] #get this house object by bldg_id
hse_obj.floodo = self #let the house know who is flooding it
logger.debug('on hse \'%s\' '%hse_obj.name)
#===================================================================
# add damage results
#===================================================================
dmg_ser = hse_obj.run_hse(row['wsl'], dmg_rat_f = dmg_rat_f)
row.update(dmg_ser) #add all these entries
#===================================================================
# extract extra attributers from teh house
#===================================================================
#find the entries to skip attribute in filling
if first:
boolar = np.invert(np.logical_or( #find entries we dont want to try and get from the house
row.index.isin(['total']), #exclude the total column
np.logical_or(
np.invert(pd.isnull(row)), #exclude reals
dmgbool #exclude damages
)))
logger.debug('retrieving %i (of %i) attribute values on each house: \n %s'
%(boolar.sum(),len(boolar), row.index[boolar].values.tolist()))
first = False
#fill thtese
for attn, v in row[boolar].items():
row[attn] = getattr(hse_obj, attn)
#===================================================================
# wrap up
#===================================================================
dmg_df.loc[index,:] = row #store this row back into the full resulst frame
#=======================================================================
# extract secondary attributes
#=======================================================================
"""makes more sense to keep nulls as nulls
as this means something different than a 'zero' damage
#=======================================================================
# set null damages to zero
#=======================================================================
for coln in ['BC', 'BS']:
dmg_df.loc[:,coln] = dmg_df[coln].replace(to_replace=np.nan, value=0)"""
#=======================================================================
# macro stats
#=======================================================================
#total
boolcol = dmg_df.columns.isin(self.model.dmg_types)
dmg_df['total'] = dmg_df.iloc[:,boolcol].sum(axis = 1) #get the sum
#=======================================================================
# closeout and reporting
#=======================================================================
#print out summaries
if not self.db_f:
logger.info('finished for %i houses'%(len(dmg_df.index)))
else:
totdmg = dmg_df['total'].sum()
totdmg_str = '$' + "{:,.2f}".format(totdmg)
logger.info('got totdmg = %s for %i houses'%(totdmg_str,len(dmg_df.index)))
if np.any(pd.isnull(dmg_df)):
"""
allowing this now
view(dmg_df[dmg_df.isna().any(axis=1)])
"""
logger.warning('got %i nulls in the damage results'%dmg_df.isna().sum().sum())
for dmg_type in self.model.dmg_types:
dmg_tot = dmg_df[dmg_type].sum()
dmg_tot_str = '$' + "{:,.2f}".format(dmg_tot)
logger.debug('for dmg_type \'%s\' dmg_tot = %s'%(dmg_type, dmg_tot_str))
return dmg_df
def check_dmg_df(self, df):
logger = self.logger.getChild('check_dmg_df')
#=======================================================================
# check totals
#=======================================================================
boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns
if not round(df['total'].sum(),2) == round(df.loc[:, boolcol].sum().sum(), 2):
logger.error('total sum did not match sum from damages')
raise IOError
def calc_statres_flood(self, df): #calculate your statistics
'running this always'
logger = self.logger.getChild('calc_statres_flood')
s = self.session.outpars_d[self.__class__.__name__]
"""needed?
self.outpath = os.path.join(self.model.outpath, self.name)"""
#=======================================================================
# total damage
#=======================================================================
for dmg_code in list(self.model.dmg_types) + ['total']:
#loop through and see if the user asked for this output
'e.g. MC, MS, BC, BS, total'
if dmg_code in s:
v = df[dmg_code].sum()
setattr(self, dmg_code, v)
logger.debug('set \'%s\' to %.2f'%(dmg_code, v))
#=======================================================================
# by flood type
#=======================================================================
if 'dmg_sw' in s:
self.dmg_sw = df.loc[~df['gw_f'], 'total'].sum() #sum all those with surface water
if 'dmg_gw' in s:
self.dmg_gw = df.loc[df['gw_f'], 'total'].sum() #sum all those with surface water
#=======================================================================
# number of houses with damage
#=======================================================================
if 'hdmg_cnt' in s:
boolidx = df.loc[:, 'total'] > 0
self.hdmg_cnt = boolidx.sum()
#=======================================================================
# average house depth
#=======================================================================
if 'hdep_avg' in s:
self.hdep_avg = np.mean(df.loc[:,'hse_depth'])
#=======================================================================
# wsl average
#=======================================================================
if 'wsl_avg' in s:
self.wsl_avg = np.mean(df.loc[:,'wsl'])
#=======================================================================
# basement exposure grade counts
#=======================================================================
'just calcing all if any of them are requested'
boolar = np.isin(np.array(['bwet_cnt', 'bdamp_cnt', 'bdry_cnt']),
np.array(s))
if np.any(boolar): self.get_begrd_cnt()
#=======================================================================
# plots
#=======================================================================
if 'dmg_res_df' in s:
self.dmg_res_df = df
"""
hp_pd.v(df)
"""
return
def get_begrd_cnt(self):
logger = self.logger.getChild('get_begrd_cnt')
df = self.dmg_res_df
#=======================================================================
# #get egrades
# try:
# ser = df.loc[:,'bsmt_egrd'] #make the slice of interest
# except:
# df.columns.values.tolist()
# raise IOError
#=======================================================================
ser = df.loc[:,'bsmt_egrd'] #make the slice of interest
begrd_l = ser.unique().tolist()
logger.debug('looping through %i bsmt_egrds: %s'%(len(begrd_l), begrd_l))
for bsmt_egrd in begrd_l:
att_n = 'b'+bsmt_egrd+'_cnt'
#count the number of occurances
boolar = ser == bsmt_egrd
setattr(self, att_n, int(boolar.sum()))
logger.debug('setting \'%s\' = %i'%(att_n, boolar.sum()))
logger.debug('finished \n')
return
#===============================================================================
# def plot_dmg_pie(self, dmg_sum_ser_raw = None,
# exp_str = 1, title = None, wtf=None): #generate a pie chart for the damage
# """
# #=======================================================================
# # INPUTS
# #=======================================================================
# dmg_sum_ser: series of damage values (see calc_summary_ser)
# index: dmg_types
# values: fdmg totals for each type for this flood
#
# exp_main: amoutn to explote structural damage values by
# """
# #=======================================================================
# # set defaults
# #=======================================================================
# logger = self.logger.getChild('plot_dmg_pie')
# if title == None: title = self.session.tag + ' '+self.name+' ' + 'dmgpie_plot'
# if wtf is None: wtf = self.session._write_figs
#
# if dmg_sum_ser_raw == None: #just calculate
# dmg_sum_ser_raw = self.dmg_res_df[self.dmg_types].sum()
# #dmg_sum_ser_raw = self.calc_summary_ser()
#
# logger.debug('with dmg_sum_ser_raw: \n %s'%dmg_sum_ser_raw)
# #=======================================================================
# # data cleaning
# #=======================================================================
# #drop na
# dmg_sum_ser1 = dmg_sum_ser_raw.dropna()
# #drop zero values
# boolidx = dmg_sum_ser1 == 0
# dmg_sum_ser2 = dmg_sum_ser1[~boolidx]
#
# if np.all(boolidx):
# logger.warning('got zero damages. not pie plot generated')
# return
#
# if boolidx.sum() > 0:
# logger.warning('dmg_pie dropped %s zero totals'%dmg_sum_ser1.index[boolidx].tolist())
#
# dmg_sum_ser = dmg_sum_ser2
# #=======================================================================
# # get data
# #=======================================================================
# #shortcuts
# dmg_types = dmg_sum_ser.index.tolist()
#
# labels = dmg_types
# sizes = dmg_sum_ser.values.tolist()
#
#
# #=======================================================================
# # #get properties list from the dfunc tab
# #=======================================================================
# colors = []
# explode_list = []
# wed_lab_list = []
# dfunc_df = self.session.pars_df_d['dfunc']
#
# for dmg_type in dmg_types:
# boolidx = dfunc_df['dmg_type'] == dmg_type #id this dmg_type
#
# #color
# color = dfunc_df.loc[boolidx,'color'].values[0]
# colors.append(color) #add to the list
#
# #explode
# explode = dfunc_df.loc[boolidx,'explode'].values[0]
# explode_list.append(explode) #add to the list
#
# #wedge_lable
# wed_lab = '$' + "{:,.2f}".format(dmg_sum_ser[dmg_type])
# wed_lab_list.append(wed_lab)
#
#
# import matplotlib.pyplot as plt
# plt.close()
# fig, ax = plt.subplots()
#
#
# wedges = ax.pie(sizes, explode=explode_list, labels=labels, colors = colors,
# autopct=hp.plot.autopct_dollars(sizes),
# shadow=True, startangle=90)
#
# ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
#
# ax.set_title(title)
#
# if wtf: #write to file
# filetail = self.session.name + ' '+self.name+' ' + 'dmgpie_plot'
# filename = os.path.join(self.model.outpath, filetail)
# hp.plot.save_fig(self, fig, savepath_raw = filename)
#
# return ax
#
#===============================================================================
#===============================================================================
# def plot_dmg_scatter(self, #scatter plot of damage for each house
# dmg_df_raw=None, yvar = 'hse_depth', xvar = 'total', plot_zeros=True,
# title=None, wtf=None, ax=None,
# linewidth = 0, markersize = 3, marker = 'x',
# **kwargs):
#
# """
# for complex figures, axes should be passed and returned
# #=======================================================================
# # INPUTS
# #=======================================================================
# should really leave this for post processing
# plot_zeros: flag to indicate whether entries with x value = 0 should be included
#
# #=======================================================================
# # TODO
# #=======================================================================
# redo this with the plot worker
# """
#
# #=======================================================================
# # defaults
# #=======================================================================
# logger = self.logger.getChild('plot_dmg_scatter')
# if title == None: title = self.session.tag + ' '+self.name + ' dmg_scatter_plot'
# if wtf is None: wtf = self.session._write_figs
#
#
# if dmg_df_raw == None:
# dmg_res_df_raw = self.dmg_res_df #just use the attached one
#
# if not hp_pd.isdf(dmg_res_df_raw): raise IOError
#
# #=======================================================================
# # manipulate data for plotting
# #=======================================================================
# if plot_zeros:
# dmg_df = dmg_res_df_raw
# else:
# #exclude those entries with zero value on the xvar
# boolidx = dmg_res_df_raw[xvar] == 0
# dmg_df = dmg_res_df_raw[~boolidx]
# self.logger.warning('%s values = zero (%i) excluded from plot'%(xvar, boolidx.sum()))
#
# #=======================================================================
# # setup data plot
# #=======================================================================
# x_ar = dmg_df[xvar].values.tolist() #damage
# xlab = 'damage($)'
# 'could make this more dynamic'
#
# if sum(x_ar) <=0:
# logger.warning('got no damage. no plot generated')
# return
#
# y_ar = dmg_df[yvar].values.tolist() #depth
#
#
# #=======================================================================
# # SEtup defaults
# #=======================================================================
# if ax == None:
# plt.close('all')
# fig = plt.figure(2)
# fig.set_size_inches(9, 6)
# ax = fig.add_subplot(111)
#
# ax.set_title(title)
# ax.set_ylabel(yvar + '(m)')
# ax.set_xlabel(xlab)
#
# #set limits
# #ax.set_xlim(min(x_ar), max(x_ar))
# #ax.set_ylim(min(y_ar), max(y_ar))
# else:
# fig = ax.figure
#
# label = self.name + ' ' + xvar
# #=======================================================================
# # send teh data for plotting
# #=======================================================================
#
# pline = ax.plot(x_ar,y_ar,
# label = label,
# linewidth = linewidth, markersize = markersize, marker = marker,
# **kwargs)
#
#
#
# #=======================================================================
# # post formatting
# #=======================================================================
# ax.get_xaxis().set_major_formatter(
# matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
#
# """
#
# plt.show()
#
#
# """
#
# if wtf: #trigger for saving the fiture
# filetail = title
# filename = os.path.join(self.model.outpath, filetail)
# hp.plot.save_fig(self, fig, savepath_raw = filename, logger=logger)
#
#
# return pline
#
#===============================================================================
class Binv( #class object for a building inventory
hp_data.Data_wrapper,
#hp.plot.Plot_o,
hp_sim.Sim_o,
hp_oop.Parent,
hp_oop.Child):
#===========================================================================
# program pars
#===========================================================================
# legacy index numbers
legacy_ind_d = {0:'ID',1:'address',2:'CPID',10:'class', 11:'struct_type', 13:'gis_area',
18:'bsmt_f', 19:'ff_height', 20:'xcoord',21:'ycoord', 25:'dem_el'}
#column index where the legacy binv transitions to teh new binv
legacy_break_ind = 26
#column names expected in the cleaned binv
"""using special types to cut down on the size"""
exp_coltyp_d = {#'name':str,'anchor_el':float, #calculated (adding these in later)
'bid':'uint16', #id for each asset.. generally the mind
'gis_area':'Float32', #eventually we should take this off
'bsmt_f':bool, 'ff_height':'Float32',
'dem_el':'Float32',
'acode_s':str,'acode_c':str,
'parcel_area':'Float32',
#'f1area':'Float32','f0area':'Float32','f1a_uf':'Float32','f0a_uf':'Float32',
'asector':str,
#'lval':'Float32','rval':'Float32' #calculate udev externally
}
#additional column names the binv will accept (but not require reals)
alwd_coltyp_d = {'bkflowv_f':bool,'sumpump_f':bool, 'genorat_f':bool,
'B_f_height':'Float32',
'ayoc':int}
#rounding parameters
coln_rnd_d = {'dem_el':2, 'B_f_height':2, 'ff_height':2}
#hse_type_list = ['AA', 'AD', 'BA', 'BC', 'BD', 'CA', 'CC', 'CD'] #classification of building types
#===========================================================================
# user provided
#===========================================================================
legacy_binv_f = True
#===========================================================================
# calculated pars
#===========================================================================
#===========================================================================
# data holders
#===========================================================================
#cnt = 0
hnew_cnt = 0
hAD_cnt = 0
def __init__(self, *vars, **kwargs):
logger = mod_logger.getChild('Binv')
logger.debug('start _init_')
"""Im explicitly attaching the child datobuilder here
dont want to change the syntax of the binv
inspect.isclass(self.kid_class)
"""
self.inherit_parent_ans=set(['mind', 'legacy_binv_f', 'gis_area_max'])
super(Binv, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# special inheritance
#=======================================================================
#self.model = self.parent
self.kid_class = House
self.reset_d.update({'hnew_cnt':0, 'hAD_cnt':0})
#=======================================================================
# checks
#=======================================================================
if self.db_f:
if not self.kid_class == House:
raise IOError
if not isinstance(self.reset_d, dict):
raise IOError
if self.model is None:
raise IOError
if not self.model.name == self.parent.name:
raise IOError
#=======================================================================
# special inits
#=======================================================================
if not self.mind in self.exp_coltyp_d:
raise Error('requested mind \'%s\' is not a valid binv column'%self.mind)
"""just require this
self.exepcted_coln = set(self.exepcted_coln + [self.mind]) #expect the mind in the column names as well"""
self.load_data()
logger.debug('finiished _init_ \n')
return
def load_data(self): #custom data loader
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('load_data')
#test pars
if self.session._parlo_f:
test_trim_row = self.test_trim_row
else: test_trim_row = None
#=======================================================================
# load the file
#=======================================================================
self.filepath = self.get_filepath()
log.debug('from filepath: %s'%self.filepath)
#load from file
df_raw = hp_pd.load_xls_df(self.filepath,
logger=log,
test_trim_row = test_trim_row,
header = 0, index_col = None)
#=======================================================================
# send for cleaning
#=======================================================================
df1 = hp_pd.clean_datapars(df_raw, logger = log)
"""
hp_pd.v(df3)
"""
#=======================================================================
# clean per the leagacy binv
#=======================================================================
if self.legacy_binv_f:
df2 = self.legacy_clean_df(df1)
else:
df2 = df1
#=======================================================================
# standard clean
#=======================================================================
df3 = self.clean_inv_df(df2)
#=======================================================================
# macro data manipulations
#=======================================================================
df4 = self.pre_calc(df3)
#=======================================================================
# checking
#=======================================================================
if self.db_f:
self.check_binv_df(df4)
#=======================================================================
# #shortcut lists
#=======================================================================
self.bid_l = tuple(df4[self.mind].values.tolist())
self.acode_l = self.get_acodes(df4) #make the resference
#=======================================================================
# wrap up
#=======================================================================
self.childmeta_df = df4.copy()
log.info('attached binv_df with %s'%str(df4.shape))
return
"""
view(df4)
"""
def get_acodes(self,
df,
null_val = 'none'): #get the set of requested acodes
log = self.logger.getChild('get_acodes')
s = set()
#loop through the acode columns
for coln in ('acode_s', 'acode_c'):
#find null values
boolidx = df[coln] == null_val
s.update(df.loc[~boolidx, coln].unique().tolist())#add the contets codes also
log.debug('found %i acodes'%len(s))
return tuple(s)
def legacy_clean_df(self, df_raw): #compile data from legacy (rfda) inventory syntax
"""
pulling column headers from the dictionary of location keys
creating some new headers as combinations of this
"""
#=======================================================================
# setup
#=======================================================================
logger = self.logger.getChild('legacy_clean_df')
d = self.legacy_ind_d
#=======================================================================
# split the df into legacy and non
#=======================================================================
df_leg_raw = df_raw.iloc[:,0:self.legacy_break_ind]
df_new = df_raw.iloc[:,self.legacy_break_ind+1:]
#=======================================================================
# clean the legacy frame
#=======================================================================
#change all the column names
df_leg1 = df_leg_raw.copy()
""" couldnt get this to work
df_leg1.rename(mapper=d, index = 'column')"""
for colind, coln in enumerate(df_leg_raw.columns):
if not colind in list(d.keys()):continue
df_leg1.rename(columns = {coln:d[colind]}, inplace=True)
logger.debug('renamed \'%s\' to \'%s\''%(coln,d[colind] ))
#trim down to these useful columns
boolcol = df_leg1.columns.isin(list(d.values())) #identify columns in the translation dictionary
df_leg2 = df_leg1.loc[:,boolcol]
logger.debug('trimmed legacy binv from %i to %i cols'%(len(df_leg_raw.columns), boolcol.sum()))
#=======================================================================
# add back the new frame
#=======================================================================
df_merge = df_leg2.join(df_new)
#=======================================================================
# house t ype
#=======================================================================
df_merge.loc[:,'acode_s'] = df_leg2.loc[:,'class'] + df_leg2.loc[:,'struct_type']
logger.debug('cleaned the binv from %s to %s'%(str(df_raw.shape), str(df_merge.shape)))
if self.db_f:
if not len(df_merge) == len(df_raw):
raise IOError
if np.any(pd.isnull(df_merge['acode_s'])):
raise IOError
return df_merge
"""
hp_pd.v(df_leg_raw)
hp_pd.v(df_merge)
hp_pd.v(df_raw)
"""
def clean_inv_df(self, #custom binv cleaning
df_raw,
):
"""
consider using the datos_fdmg wraps
"""
logger = self.logger.getChild('clean_inv_df')
#clean with kill_flags
'this makes it easy to trim the data'
df1 = hp_pd.clean_kill_flag(df_raw, logger = logger)
#=======================================================================
# #reindex by a sorted mind (and keep the column)
#=======================================================================
df1.loc[:,self.mind] = df1.astype({self.mind:int}) #typeset the index
df1 = df1.set_index(self.mind, drop=False, verify_integrity=True).sort_index()
#=======================================================================
# mandatory columns
#=======================================================================
#check we got all the columns we require
exp_coln_ar = np.array(list(self.exp_coltyp_d.keys()))
boolar = np.invert(
|
np.isin(exp_coln_ar, df1.columns)
|
numpy.isin
|
################################################################################
# Copyright (C) 2013-2015 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `gaussian` module.
"""
import numpy as np
from scipy import special
from numpy import testing
from .. import gaussian
from bayespy.nodes import (Gaussian,
GaussianARD,
GaussianGamma,
Gamma,
Wishart,
ConcatGaussian)
from ..wishart import WishartMoments
from ...vmp import VB
from bayespy.utils import misc
from bayespy.utils import linalg
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestGaussianFunctions(TestCase):
def test_rotate_covariance(self):
"""
Test the Gaussian array covariance rotation.
"""
# Check matrix
R = np.random.randn(2,2)
Cov = np.random.randn(2,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R),
np.einsum('ik,kl,lj', R, Cov, R.T))
# Check matrix with plates
R = np.random.randn(2,2)
Cov = np.random.randn(4,3,2,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R),
np.einsum('...ik,...kl,...lj', R, Cov, R.T))
# Check array, first axis
R = np.random.randn(2,2)
Cov = np.random.randn(2,3,3,2,3,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-3),
np.einsum('...ik,...kablcd,...lj->...iabjcd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=0),
np.einsum('...ik,...kablcd,...lj->...iabjcd',
R,
Cov,
R.T))
# Check array, middle axis
R = np.random.randn(2,2)
Cov = np.random.randn(3,2,3,3,2,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-2),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=1),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
# Check array, last axis
R = np.random.randn(2,2)
Cov = np.random.randn(3,3,2,3,3,2)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-1),
np.einsum('...ik,...abkcdl,...lj->...abicdj',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=2),
np.einsum('...ik,...abkcdl,...lj->...abicdj',
R,
Cov,
R.T))
# Check array, middle axis with plates
R = np.random.randn(2,2)
Cov = np.random.randn(4,4,3,2,3,3,2,3)
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=-2),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
self.assertAllClose(gaussian.rotate_covariance(Cov, R,
ndim=3,
axis=1),
np.einsum('...ik,...akbcld,...lj->...aibcjd',
R,
Cov,
R.T))
pass
class TestGaussianARD(TestCase):
def test_init(self):
"""
Test the constructor of GaussianARD
"""
def check_init(true_plates, true_shape, mu, alpha, **kwargs):
X = GaussianARD(mu, alpha, **kwargs)
self.assertEqual(X.dims, (true_shape, true_shape+true_shape),
msg="Constructed incorrect dimensionality")
self.assertEqual(X.plates, true_plates,
msg="Constructed incorrect plates")
#
# Create from constant parents
#
# Use ndim=0 for constant mu
check_init((),
(),
0,
1)
check_init((3,2),
(),
np.zeros((3,2,)),
np.ones((2,)))
check_init((4,2,2,3),
(),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)))
# Use ndim
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
ndim=2)
# Use shape
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
shape=(2,3))
# Use ndim and shape
check_init((4,2),
(2,3),
np.zeros((2,1,3,)),
np.ones((4,1,2,3)),
ndim=2,
shape=(2,3))
#
# Create from node parents
#
# ndim=0 by default
check_init((3,),
(),
GaussianARD(0, 1,
plates=(3,)),
Gamma(1, 1,
plates=(3,)))
check_init((4,2,2,3),
(),
GaussianARD(np.zeros((2,1,3)),
np.ones((2,1,3)),
ndim=3),
Gamma(np.ones((4,1,2,3)),
np.ones((4,1,2,3))))
# Use ndim
check_init((4,),
(2,2,3),
GaussianARD(np.zeros((4,1,2,3)),
np.ones((4,1,2,3)),
ndim=2),
Gamma(np.ones((4,2,1,3)),
np.ones((4,2,1,3))),
ndim=3)
# Use shape
check_init((4,),
(2,2,3),
GaussianARD(np.zeros((4,1,2,3)),
np.ones((4,1,2,3)),
ndim=2),
Gamma(np.ones((4,2,1,3)),
np.ones((4,2,1,3))),
shape=(2,2,3))
# Use ndim and shape
check_init((4,2),
(2,3),
GaussianARD(np.zeros((2,1,3)),
np.ones((2,1,3)),
ndim=2),
Gamma(np.ones((4,1,2,3)),
np.ones((4,1,2,3))),
ndim=2,
shape=(2,3))
# Test for a found bug
check_init((),
(3,),
np.ones(3),
1,
ndim=1)
# Parent mu has more axes
check_init(
(2,),
(3,),
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
np.ones((2,3)),
ndim=1
)
# DO NOT add axes if necessary
self.assertRaises(
ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
1,
ndim=3
)
#
# Errors
#
# Inconsistent shapes
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=1),
np.ones((4,3)),
ndim=2)
# Inconsistent dims of mu and alpha
self.assertRaises(ValueError,
GaussianARD,
np.zeros((2,3)),
np.ones((2,)))
# Inconsistent plates of mu and alpha
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((3,2,3)),
np.ones((3,2,3)),
ndim=2),
np.ones((3,4,2,3)),
ndim=3)
# Inconsistent ndim and shape
self.assertRaises(ValueError,
GaussianARD,
np.zeros((2,3)),
np.ones((2,)),
shape=(2,3),
ndim=1)
# Incorrect shape
self.assertRaises(ValueError,
GaussianARD,
GaussianARD(np.zeros((2,3)),
np.ones((2,3)),
ndim=2),
np.ones((2,3)),
shape=(2,2))
pass
def test_message_to_child(self):
"""
Test moments of GaussianARD.
"""
# Check that moments have full shape when broadcasting
X = GaussianARD(np.zeros((2,)),
np.ones((3,2)),
shape=(4,3,2))
(u0, u1) = X._message_to_child()
self.assertEqual(np.shape(u0),
(4,3,2))
self.assertEqual(np.shape(u1),
(4,3,2,4,3,2))
# Check the formula
X = GaussianARD(2, 3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2)
self.assertAllClose(u1, 2**2 + 1/3)
# Check the formula for multidimensional arrays
X = GaussianARD(2*np.ones((2,1,4)),
3*np.ones((2,3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((2,3,4)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted alpha
X = GaussianARD(2*np.ones((2,3,4)),
3*np.ones((3,1)),
ndim=3)
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu and alpha
X = GaussianARD(2*np.ones((3,1)),
3*np.ones((3,1)),
shape=(2,3,4))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check the formula for dim-broadcasted mu with plates
mu = GaussianARD(2*np.ones((5,1,3,4)),
np.ones((5,1,3,4)),
shape=(3,4),
plates=(5,1))
X = GaussianARD(mu,
3*np.ones((5,2,3,4)),
shape=(2,3,4),
plates=(5,))
(u0, u1) = X._message_to_child()
self.assertAllClose(u0, 2*np.ones((5,2,3,4)))
self.assertAllClose(u1,
2**2 * np.ones((5,2,3,4,2,3,4))
+ 1/3 * misc.identity(2,3,4))
# Check posterior
X = GaussianARD(2, 3)
Y = GaussianARD(X, 1)
Y.observe(10)
X.update()
(u0, u1) = X._message_to_child()
self.assertAllClose(u0,
1/(3+1) * (3*2 + 1*10))
self.assertAllClose(u1,
(1/(3+1) * (3*2 + 1*10))**2 + 1/(3+1))
pass
def test_message_to_parent_mu(self):
"""
Test that GaussianARD computes the message to the 1st parent correctly.
"""
# Check formula with uncertain parent alpha
mu = GaussianARD(0, 1)
alpha = Gamma(2,1)
X = GaussianARD(mu,
alpha)
X.observe(3)
(m0, m1) = mu._message_from_children()
#(m0, m1) = X._message_to_parent(0)
self.assertAllClose(m0,
2*3)
self.assertAllClose(m1,
-0.5*2)
# Check formula with uncertain node
mu = GaussianARD(1, 1e10)
X = GaussianARD(mu, 2)
Y = GaussianARD(X, 1)
Y.observe(5)
X.update()
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2 * 1/(2+1)*(2*1+1*5))
self.assertAllClose(m1,
-0.5*2)
# Check alpha larger than mu
mu = GaussianARD(np.zeros((2,3)), 1e10, shape=(2,3))
X = GaussianARD(mu,
2*np.ones((3,2,3)))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3 * np.ones((2,3)))
self.assertAllClose(m1,
-0.5 * 3 * 2*misc.identity(2,3))
# Check mu larger than alpha
mu = GaussianARD(np.zeros((3,2,3)), 1e10, shape=(3,2,3))
X = GaussianARD(mu,
2*np.ones((2,3)))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2 * 3 * np.ones((3,2,3)))
self.assertAllClose(m1,
-0.5 * 2*misc.identity(3,2,3))
# Check node larger than mu and alpha
mu = GaussianARD(np.zeros((2,3)), 1e10, shape=(2,3))
X = GaussianARD(mu,
2*np.ones((3,)),
shape=(3,2,3))
X.observe(3*np.ones((3,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3*np.ones((2,3)))
self.assertAllClose(m1,
-0.5 * 2 * 3*misc.identity(2,3))
# Check broadcasting of dimensions
mu = GaussianARD(np.zeros((2,1)), 1e10, shape=(2,1))
X = GaussianARD(mu,
2*np.ones((2,3)),
shape=(2,3))
X.observe(3*np.ones((2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * 3*np.ones((2,1)))
self.assertAllClose(m1,
-0.5 * 2 * 3*misc.identity(2,1))
# Check plates for smaller mu than node
mu = GaussianARD(0,1,
shape=(3,),
plates=(4,1,1))
X = GaussianARD(mu,
2*np.ones((3,)),
shape=(2,3),
plates=(4,5))
X.observe(3*np.ones((4,5,2,3)))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0 * np.ones((4,1,1,3)),
2*3 * 5*2*np.ones((4,1,1,3)))
self.assertAllClose(m1 * np.ones((4,1,1,3,3)),
-0.5*2 * 5*2*misc.identity(3) * np.ones((4,1,1,3,3)))
# Check mask
mu = GaussianARD(np.zeros((2,1,3)), 1e10, shape=(3,))
X = GaussianARD(mu,
2*np.ones((2,4,3)),
shape=(3,),
plates=(2,4,))
X.observe(3*np.ones((2,4,3)), mask=[[True, True, True, False],
[False, True, False, True]])
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
(2*3 * np.ones((2,1,3))
* np.array([[[3]], [[2]]])))
self.assertAllClose(m1,
(-0.5*2 * misc.identity(3)
* np.ones((2,1,1,1))
* np.array([[[[3]]], [[[2]]]])))
# Check mask with different shapes
mu = GaussianARD(np.zeros((2,1,3)), 1e10, shape=())
X = GaussianARD(mu,
2*np.ones((2,4,3)),
shape=(3,),
plates=(2,4,))
mask = np.array([[True, True, True, False],
[False, True, False, True]])
X.observe(3*np.ones((2,4,3)), mask=mask)
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * np.sum(np.ones((2,4,3))*mask[...,None],
axis=-2,
keepdims=True))
self.assertAllClose(m1,
(-0.5*2 * np.sum(np.ones((2,4,3))*mask[...,None],
axis=-2,
keepdims=True)))
# Check non-ARD Gaussian child
mu = np.array([1,2])
Mu = GaussianARD(mu, 1e10, shape=(2,))
alpha = np.array([3,4])
Lambda = np.array([[1, 0.5],
[0.5, 1]])
X = GaussianARD(Mu, alpha, ndim=1)
Y = Gaussian(X, Lambda)
y = np.array([5,6])
Y.observe(y)
X.update()
(m0, m1) = Mu._message_from_children()
mean = np.dot(np.linalg.inv(np.diag(alpha)+Lambda),
np.dot(np.diag(alpha), mu)
+ np.dot(Lambda, y))
self.assertAllClose(m0,
np.dot(np.diag(alpha), mean))
self.assertAllClose(m1,
-0.5*np.diag(alpha))
# Check broadcasted variable axes
mu = GaussianARD(np.zeros(1), 1e10, shape=(1,))
X = GaussianARD(mu,
2,
shape=(3,))
X.observe(3*np.ones(3))
(m0, m1) = mu._message_from_children()
self.assertAllClose(m0,
2*3 * np.sum(np.ones(3), axis=-1, keepdims=True))
self.assertAllClose(m1,
-0.5*2 * np.sum(np.identity(3),
axis=(-1,-2),
keepdims=True))
pass
def test_message_to_parent_alpha(self):
"""
Test the message from GaussianARD the 2nd parent (alpha).
"""
# Check formula with uncertain parent mu
mu = GaussianARD(1,1)
tau = Gamma(0.5*1e10, 1e10)
X = GaussianARD(mu,
tau)
X.observe(3)
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(3**2 - 2*3*1 + 1**2+1))
self.assertAllClose(m1,
0.5)
# Check formula with uncertain node
tau = Gamma(1e10, 1e10)
X = GaussianARD(2, tau)
Y = GaussianARD(X, 1)
Y.observe(5)
X.update()
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(1/(1+1)+3.5**2 - 2*3.5*2 + 2**2))
self.assertAllClose(m1,
0.5)
# Check alpha larger than mu
alpha = Gamma(np.ones((3,2,3))*1e10, 1e10)
X = GaussianARD(np.ones((2,3)),
alpha,
ndim=3)
X.observe(2*np.ones((3,2,3)))
(m0, m1) = alpha._message_from_children()
self.assertAllClose(m0 * np.ones((3,2,3)),
-0.5*(2**2 - 2*2*1 + 1**2) * np.ones((3,2,3)))
self.assertAllClose(m1*np.ones((3,2,3)),
0.5*np.ones((3,2,3)))
# Check mu larger than alpha
tau = Gamma(np.ones((2,3))*1e10, 1e10)
X = GaussianARD(np.ones((3,2,3)),
tau,
ndim=3)
X.observe(2*np.ones((3,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0,
-0.5*(2**2 - 2*2*1 + 1**2) * 3 * np.ones((2,3)))
self.assertAllClose(m1 * np.ones((2,3)),
0.5 * 3 * np.ones((2,3)))
# Check node larger than mu and alpha
tau = Gamma(np.ones((3,))*1e10, 1e10)
X = GaussianARD(np.ones((2,3)),
tau,
shape=(3,2,3))
X.observe(2*np.ones((3,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones(3),
-0.5*(2**2 - 2*2*1 + 1**2) * 6 * np.ones((3,)))
self.assertAllClose(m1 * np.ones(3),
0.5 * 6 * np.ones(3))
# Check plates for smaller mu than node
tau = Gamma(np.ones((4,1,2,3))*1e10, 1e10)
X = GaussianARD(GaussianARD(1, 1,
shape=(3,),
plates=(4,1,1)),
tau,
shape=(2,3),
plates=(4,5))
X.observe(2*np.ones((4,5,2,3)))
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones((4,1,2,3)),
(-0.5 * (2**2 - 2*2*1 + 1**2+1)
* 5*np.ones((4,1,2,3))))
self.assertAllClose(m1 * np.ones((4,1,2,3)),
5*0.5 * np.ones((4,1,2,3)))
# Check mask
tau = Gamma(np.ones((4,3))*1e10, 1e10)
X = GaussianARD(np.ones(3),
tau,
shape=(3,),
plates=(2,4,))
X.observe(2*np.ones((2,4,3)), mask=[[True, False, True, False],
[False, True, True, False]])
(m0, m1) = tau._message_from_children()
self.assertAllClose(m0 * np.ones((4,3)),
(-0.5 * (2**2 - 2*2*1 + 1**2)
* np.ones((4,3))
* np.array([[1], [1], [2], [0]])))
self.assertAllClose(m1 * np.ones((4,3)),
0.5 * np.array([[1], [1], [2], [0]]) * np.ones((4,3)))
# Check non-ARD Gaussian child
mu = np.array([1,2])
alpha = np.array([3,4])
Alpha = Gamma(alpha*1e10, 1e10)
Lambda = np.array([[1, 0.5],
[0.5, 1]])
X = GaussianARD(mu, Alpha, ndim=1)
Y = Gaussian(X, Lambda)
y = np.array([5,6])
Y.observe(y)
X.update()
(m0, m1) = Alpha._message_from_children()
Cov = np.linalg.inv(np.diag(alpha)+Lambda)
mean = np.dot(Cov, np.dot(np.diag(alpha), mu)
+ np.dot(Lambda, y))
self.assertAllClose(m0 * np.ones(2),
-0.5 * np.diag(
np.outer(mean, mean) + Cov
- np.outer(mean, mu)
- np.outer(mu, mean)
+ np.outer(mu, mu)))
self.assertAllClose(m1 * np.ones(2),
0.5 * np.ones(2))
pass
def test_message_to_parents(self):
""" Check gradient passed to inputs parent node """
D = 3
X = Gaussian(np.random.randn(D), random.covariance(D))
a = Gamma(np.random.rand(D), np.random.rand(D))
Y = GaussianARD(X, a)
Y.observe(np.random.randn(D))
self.assert_message_to_parent(Y, X)
self.assert_message_to_parent(Y, a)
pass
def test_lowerbound(self):
"""
Test the variational Bayesian lower bound term for GaussianARD.
"""
# Test vector formula with full noise covariance
m = np.random.randn(2)
alpha = np.random.rand(2)
y = np.random.randn(2)
X = GaussianARD(m, alpha, ndim=1)
V = np.array([[3,1],[1,3]])
Y = Gaussian(X, V)
Y.observe(y)
X.update()
Cov = np.linalg.inv(np.diag(alpha) + V)
mu = np.dot(Cov, np.dot(V, y) + alpha*m)
x2 = np.outer(mu, mu) + Cov
logH_X = (+ 2*0.5*(1+np.log(2*np.pi))
+ 0.5*np.log(np.linalg.det(Cov)))
logp_X = (- 2*0.5*np.log(2*np.pi)
+ 0.5*np.log(np.linalg.det(np.diag(alpha)))
- 0.5*np.sum(np.diag(alpha)
* (x2
- np.outer(mu,m)
- np.outer(m,mu)
+ np.outer(m,m))))
self.assertAllClose(logp_X + logH_X,
X.lower_bound_contribution())
def check_lower_bound(shape_mu, shape_alpha, plates_mu=(), **kwargs):
M = GaussianARD(np.ones(plates_mu + shape_mu),
np.ones(plates_mu + shape_mu),
shape=shape_mu,
plates=plates_mu)
if not ('ndim' in kwargs or 'shape' in kwargs):
kwargs['ndim'] = len(shape_mu)
X = GaussianARD(M,
2*np.ones(shape_alpha),
**kwargs)
Y = GaussianARD(X,
3*np.ones(X.get_shape(0)),
**kwargs)
Y.observe(4*np.ones(Y.get_shape(0)))
X.update()
Cov = 1/(2+3)
mu = Cov * (2*1 + 3*4)
x2 = mu**2 + Cov
logH_X = (+ 0.5*(1+np.log(2*np.pi))
+ 0.5*np.log(Cov))
logp_X = (- 0.5*np.log(2*np.pi)
+ 0.5*np.log(2)
- 0.5*2*(x2 - 2*mu*1 + 1**2+1))
r = np.prod(X.get_shape(0))
self.assertAllClose(r * (logp_X + logH_X),
X.lower_bound_contribution())
# Test scalar formula
check_lower_bound((), ())
# Test array formula
check_lower_bound((2,3), (2,3))
# Test dim-broadcasting of mu
check_lower_bound((3,1), (2,3,4))
# Test dim-broadcasting of alpha
check_lower_bound((2,3,4), (3,1))
# Test dim-broadcasting of mu and alpha
check_lower_bound((3,1), (3,1),
shape=(2,3,4))
# Test dim-broadcasting of mu with plates
check_lower_bound((), (),
plates_mu=(),
shape=(),
plates=(5,))
# BUG: Scalar parents for array variable caused einsum error
check_lower_bound((), (),
shape=(3,))
# BUG: Log-det was summed over plates
check_lower_bound((), (),
shape=(3,),
plates=(4,))
pass
def test_rotate(self):
"""
Test the rotation of Gaussian ARD arrays.
"""
def check(shape, plates, einsum_x, einsum_xx, axis=-1):
# TODO/FIXME: Improve by having non-diagonal precision/covariance
# parameter for the Gaussian X
D = shape[axis]
X = GaussianARD(np.random.randn(*(plates+shape)),
np.random.rand(*(plates+shape)),
shape=shape,
plates=plates)
(x, xx) = X.get_moments()
R = np.random.randn(D,D)
X.rotate(R, axis=axis)
(rx, rxxr) = X.get_moments()
self.assertAllClose(rx,
np.einsum(einsum_x, R, x))
self.assertAllClose(rxxr,
np.einsum(einsum_xx, R, xx, R))
pass
# Rotate vector
check((3,), (),
'...jk,...k->...j',
'...mk,...kl,...nl->...mn')
check((3,), (2,4),
'...jk,...k->...j',
'...mk,...kl,...nl->...mn')
# Rotate array
check((2,3,4), (),
'...jc,...abc->...abj',
'...mc,...abcdef,...nf->...abmden',
axis=-1)
check((2,3,4), (5,6),
'...jc,...abc->...abj',
'...mc,...abcdef,...nf->...abmden',
axis=-1)
check((2,3,4), (),
'...jb,...abc->...ajc',
'...mb,...abcdef,...ne->...amcdnf',
axis=-2)
check((2,3,4), (5,6),
'...jb,...abc->...ajc',
'...mb,...abcdef,...ne->...amcdnf',
axis=-2)
check((2,3,4), (),
'...ja,...abc->...jbc',
'...ma,...abcdef,...nd->...mbcnef',
axis=-3)
check((2,3,4), (5,6),
'...ja,...abc->...jbc',
'...ma,...abcdef,...nd->...mbcnef',
axis=-3)
pass
def test_rotate_plates(self):
# Basic test for Gaussian vectors
X = GaussianARD(np.random.randn(3,2),
np.random.rand(3,2),
shape=(2,),
plates=(3,))
(u0, u1) = X.get_moments()
Cov = u1 - linalg.outer(u0, u0, ndim=1)
Q = np.random.randn(3,3)
Qu0 = np.einsum('ik,kj->ij', Q, u0)
QCov = np.einsum('k,kij->kij', np.sum(Q, axis=0)**2, Cov)
Qu1 = QCov + linalg.outer(Qu0, Qu0, ndim=1)
X.rotate_plates(Q, plate_axis=-1)
(u0, u1) = X.get_moments()
self.assertAllClose(u0, Qu0)
self.assertAllClose(u1, Qu1)
# Test full covariance, that is, with observations
X = GaussianARD(np.random.randn(3,2),
|
np.random.rand(3,2)
|
numpy.random.rand
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import *
from keras.models import load_model
import matplotlib.pyplot as plt
# generate training data
x = np.linspace(0.0,2*np.pi,20)
y = np.sin(x)
# save training data to file
data = np.vstack((x,y)).T
|
np.savetxt('train_data.csv',data,header='x,y',comments='',delimiter=',')
|
numpy.savetxt
|
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import treecorr
from test_helper import assert_raises, do_pickle, profile, timer, get_from_wiki
@timer
def test_cat_patches():
# Test the different ways to set patches in the catalog.
# Use the same input as test_radec()
if __name__ == '__main__':
ngal = 10000
npatch = 128
max_top = 7
else:
ngal = 1000
npatch = 8
max_top = 3
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
# cat0 is the base catalog without patches
cat0 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
assert len(cat0.patches) == 1
assert cat0.patches[0].ntot == ngal
# 1. Make the patches automatically using kmeans
# Note: If npatch is a power of two, then the patch determination is completely
# deterministic, which is helpful for this test.
cat1 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch)
p2, cen = cat0.getNField(max_top=max_top).run_kmeans(npatch)
np.testing.assert_array_equal(cat1.patch, p2)
assert len(cat1.patches) == npatch
assert np.sum([p.ntot for p in cat1.patches]) == ngal
# 2. Optionally can use alt algorithm
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_alt=True)
p3, cen = cat0.getNField(max_top=max_top).run_kmeans(npatch, alt=True)
np.testing.assert_array_equal(cat2.patch, p3)
assert len(cat2.patches) == npatch
# 3. Optionally can set different init method
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='kmeans++')
# Can't test this equalling a repeat run from cat0, because kmpp has a random aspect to it.
# But at least check that it isn't equal to the other two versions.
assert not np.array_equal(cat3.patch, p2)
assert not np.array_equal(cat3.patch, p3)
cat3b = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='random')
assert not np.array_equal(cat3b.patch, p2)
assert not np.array_equal(cat3b.patch, p3)
assert not np.array_equal(cat3b.patch, cat3.patch)
# 4. Pass in patch array explicitly
cat4 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=p2)
np.testing.assert_array_equal(cat4.patch, p2)
# 5. Read patch from a column in ASCII file
file_name5 = os.path.join('output','test_cat_patches.dat')
cat4.write(file_name5)
cat5 = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3)
assert not cat5.loaded
np.testing.assert_array_equal(cat5.patch, p2)
assert cat5.loaded # Now it's loaded, since we accessed cat5.patch.
# Just load a single patch from an ASCII file with many patches.
for i in range(npatch):
cat = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i)
assert cat.patch == cat5.patches[i].patch == i
np.testing.assert_array_equal(cat.x,cat5.patches[i].x)
np.testing.assert_array_equal(cat.y,cat5.patches[i].y)
assert cat == cat5.patches[i]
cata = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i, last_row=ngal//2)
catb = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i, first_row=ngal//2+1)
assert cata.patch == i
np.testing.assert_array_equal(cata.x,cat5.patches[i].x[:cata.nobj])
np.testing.assert_array_equal(cata.y,cat5.patches[i].y[:cata.nobj])
np.testing.assert_array_equal(catb.x,cat5.patches[i].x[cata.nobj:])
np.testing.assert_array_equal(catb.y,cat5.patches[i].y[cata.nobj:])
# get_patches from a single patch will return a list with just itself.
assert cata.get_patches(False) == [cata]
assert catb.get_patches(True) == [catb]
# Patches start in an unloaded state (by default)
cat5b = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3)
assert not cat5b.loaded
cat5b_patches = cat5b.get_patches(low_mem=True)
assert cat5b.loaded # Needed to load to get number of patches.
cat5b._patches = None # Need this so get_patches doesn't early exit.
cat5b_patches2 = cat5b.get_patches(low_mem=True) # Repeat with loaded cat5b should be equiv.
cat5b._patches = None
cat5b_patches3 = cat5b.get_patches(low_mem=False)
cat5b._patches = None
cat5b_patches4 = cat5b.get_patches() # Default is False
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat5b_patches[i].loaded # But single patch not loaded yet.
assert not cat5b_patches2[i].loaded
assert cat5b_patches3[i].loaded # Unless we didn't ask for low memory.
assert cat5b_patches4[i].loaded
assert np.all(cat5b_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat5b_patches[i].x, cat5.x[cat5.patch == i])
# Just load a single patch from an ASCII file with many patches.
for i in range(4):
cat = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i)
assert cat.patch == cat5.patches[i].patch
np.testing.assert_array_equal(cat.x,cat5.patches[i].x)
np.testing.assert_array_equal(cat.y,cat5.patches[i].y)
assert cat == cat5.patches[i]
assert cat == cat5b_patches[i]
# 6. Read patch from a column in FITS file
try:
import fitsio
except ImportError:
print('Skip fitsio tests of patch_col')
else:
file_name6 = os.path.join('output','test_cat_patches.fits')
cat4.write(file_name6)
cat6 = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch')
np.testing.assert_array_equal(cat6.patch, p2)
cat6b = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch_hdu=1)
np.testing.assert_array_equal(cat6b.patch, p2)
assert len(cat6.patches) == npatch
assert len(cat6b.patches) == npatch
# Calling get_patches will not force loading of the file.
cat6c = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch')
assert not cat6c.loaded
cat6c_patches = cat6c.get_patches(low_mem=True)
assert cat6c.loaded
cat6c._patches = None
cat6c_patches2 = cat6c.get_patches(low_mem=True)
cat6c._patches = None
cat6c_patches3 = cat6c.get_patches(low_mem=False)
cat6c._patches = None
cat6c_patches4 = cat6c.get_patches()
for i in range(4):
assert not cat6c_patches[i].loaded
assert not cat6c_patches2[i].loaded
assert cat6c_patches3[i].loaded
assert cat6c_patches4[i].loaded
assert np.all(cat6c_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat6c_patches[i].x, cat6.x[cat6.patch == i])
cat = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
assert cat.patch == cat6.patches[i].patch
np.testing.assert_array_equal(cat.x,cat6.patches[i].x)
np.testing.assert_array_equal(cat.y,cat6.patches[i].y)
assert cat == cat6.patches[i]
assert cat == cat6c_patches[i]
cata = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec', last_row=ngal//2,
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
catb = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec', first_row=ngal//2+1,
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
assert cata.patch == i
np.testing.assert_array_equal(cata.x,cat6.patches[i].x[:cata.nobj])
np.testing.assert_array_equal(cata.y,cat6.patches[i].y[:cata.nobj])
np.testing.assert_array_equal(catb.x,cat6.patches[i].x[cata.nobj:])
np.testing.assert_array_equal(catb.y,cat6.patches[i].y[cata.nobj:])
# get_patches from a single patch will return a list with just itself.
assert cata.get_patches(False) == [cata]
assert catb.get_patches(True) == [catb]
# 7. Set a single patch number
cat7 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=3)
np.testing.assert_array_equal(cat7.patch, 3)
cat8 = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch=3)
np.testing.assert_array_equal(cat8.patch, 3)
# low_mem=True works if not from a file, but it's not any different
cat1_patches = cat1.patches
cat1._patches = None
assert cat1.get_patches(low_mem=True) == cat1_patches
cat2_patches = cat2.patches
cat2._patches = None
assert cat2.get_patches(low_mem=True) == cat2_patches
cat9 = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad')
cat9_patches = cat9.patches
cat9._patches = None
assert cat9.get_patches(low_mem=True) == cat9_patches
# Check serialization with patch
do_pickle(cat2)
do_pickle(cat7)
# Check some invalid parameters
# Can't have both npatch and patch
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch, patch=p2)
# patch has to have same number of entries
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=p2[:17])
# npatch=0 is not allowed
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=0)
# bad option names
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='invalid')
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_alt='maybe')
with assert_raises(ValueError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col='invalid')
# bad patch col
with assert_raises(ValueError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=4)
# cannot give vector for patch when others are from file name
# (Should this be revisited? Allow this?)
with assert_raises(TypeError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch=p2)
try:
# bad patch hdu
with assert_raises(IOError):
treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch_hdu=2)
# bad patch col name for fits
with assert_raises(ValueError):
treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patches')
except NameError:
# file_name6 might not exist if skipped above because of fitsio missing.
pass
@timer
def test_cat_centers():
# Test writing patch centers and setting patches from centers.
if __name__ == '__main__':
ngal = 100000
npatch = 128
else:
ngal = 1000
npatch = 8
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat1 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch)
centers = [(c.x.mean(), c.y.mean(), c.z.mean()) for c in cat1.patches]
centers /= np.sqrt(np.sum(np.array(centers)**2,axis=1))[:,np.newaxis]
centers2 = cat1.patch_centers
print('center0 = ',centers[0])
print(' ',centers2[0])
print('center1 = ',centers[1])
print(' ',centers2[1])
print('max center difference = ',np.max(np.abs(centers2-centers)))
for p in range(npatch):
np.testing.assert_allclose(centers2[p], centers[p], atol=1.e-4)
centers3 = cat1.get_patch_centers()
for p in range(npatch):
np.testing.assert_allclose(centers3[p], centers2[p])
# Write the centers to a file
cen_file = os.path.join('output','test_cat_centers.dat')
cat1.write_patch_centers(cen_file)
# Read the centers file
centers3 = cat1.read_patch_centers(cen_file)
np.testing.assert_allclose(centers3, centers2)
# Set patches from a centers dict
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=centers2)
np.testing.assert_array_equal(cat2.patch, cat1.patch)
np.testing.assert_array_equal(cat2.patch_centers, centers2)
# Set patches from file
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file)
np.testing.assert_array_equal(cat3.patch, cat1.patch)
np.testing.assert_array_equal(cat3.patch_centers, centers2)
# If doing this from a config dict, patch_centers will be found in the config dict.
config = dict(ra_units='rad', dec_units='rad', patch_centers=cen_file)
cat4 = treecorr.Catalog(config=config, ra=ra, dec=dec)
np.testing.assert_array_equal(cat4.patch, cat1.patch)
np.testing.assert_array_equal(cat4.patch_centers, centers2)
# If the original catalog had manual patches set, it needs to calculate the centers
# after the fact, so things aren't perfect, but should be close.
cat5 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch=cat1.patch)
np.testing.assert_array_equal(cat5.patch, cat1.patch)
np.testing.assert_allclose(cat5.patch_centers, centers2, atol=1.e-4)
cat6 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat5.patch_centers)
print('n diff = ',np.sum(cat6.patch != cat5.patch))
assert np.sum(cat6.patch != cat5.patch) < 10
np.testing.assert_allclose(cat6.patch_centers, cat5.patch_centers)
# The patch centers from the patch sub-catalogs should match.
cen5 = [c.patch_centers[0] for c in cat5.patches]
np.testing.assert_array_equal(cen5, cat5.patch_centers)
# With weights, things can be a bit farther off of course.
w=rng.uniform(1,2,len(ra))
cat7 = treecorr.Catalog(ra=ra, dec=dec, w=w, ra_units='rad', dec_units='rad',
patch=cat1.patch)
cat8 = treecorr.Catalog(ra=ra, dec=dec, w=w, ra_units='rad', dec_units='rad',
patch_centers=cat7.patch_centers)
print('n diff = ',np.sum(cat8.patch != cat7.patch))
assert np.sum(cat8.patch != cat7.patch) < 200
np.testing.assert_allclose(cat8.patch_centers, cat7.patch_centers)
# But given the same patch centers, the weight doesn't change the assigned patches.
cat8b = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat7.patch_centers)
np.testing.assert_array_equal(cat8.patch, cat8b.patch)
np.testing.assert_array_equal(cat8.patch_centers, cat8b.patch_centers)
# Check flat
cat9 = treecorr.Catalog(x=x, y=y, npatch=npatch)
cen_file2 = os.path.join('output','test_cat_centers.txt')
cat9.write_patch_centers(cen_file2)
centers9 = cat9.read_patch_centers(cen_file2)
np.testing.assert_allclose(centers9, cat9.patch_centers)
cat10 = treecorr.Catalog(x=x, y=y, patch_centers=cen_file2)
np.testing.assert_array_equal(cat10.patch, cat9.patch)
np.testing.assert_array_equal(cat10.patch_centers, cat9.patch_centers)
cat11 = treecorr.Catalog(x=x, y=y, patch=cat9.patch)
cat12 = treecorr.Catalog(x=x, y=y, patch_centers=cat11.patch_centers)
print('n diff = ',np.sum(cat12.patch != cat11.patch))
assert np.sum(cat12.patch != cat11.patch) < 10
cat13 = treecorr.Catalog(x=x, y=y, w=w, patch=cat9.patch)
cat14 = treecorr.Catalog(x=x, y=y, w=w, patch_centers=cat13.patch_centers)
print('n diff = ',np.sum(cat14.patch != cat13.patch))
assert np.sum(cat14.patch != cat13.patch) < 200
np.testing.assert_array_equal(cat14.patch_centers, cat13.patch_centers)
# The patch centers from the patch sub-catalogs should match.
cen13 = [c.patch_centers[0] for c in cat13.patches]
np.testing.assert_array_equal(cen13, cat13.patch_centers)
# Using the full patch centers, you can also just load a single patch.
for i in range(npatch):
cat = treecorr.Catalog(x=x, y=y, w=w, patch_centers=cat13.patch_centers, patch=i)
assert cat.patch == cat14.patches[i].patch
np.testing.assert_array_equal(cat.x,cat14.patches[i].x)
np.testing.assert_array_equal(cat.y,cat14.patches[i].y)
assert cat == cat14.patches[i]
# Loading from a file with patch_centers can mean that get_patches won't trigger a load.
file_name15 = os.path.join('output','test_cat_centers_f15.dat')
cat14.write(file_name15)
cat15 = treecorr.Catalog(file_name15, x_col=1, y_col=2, w_col=3,
patch_centers=cat14.patch_centers)
assert not cat15.loaded
cat15_patches = cat15.get_patches(low_mem=True)
assert not cat15.loaded # Unlike above (in test_cat_patches) it's still unloaded.
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat15_patches[i].loaded
assert np.all(cat15_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat15_patches[i].x, cat15.x[cat15.patch == i])
cat = treecorr.Catalog(file_name15, x_col=1, y_col=2, w_col=3,
patch_centers=cat15.patch_centers, patch=i)
assert cat.patch == cat15.patches[i].patch
np.testing.assert_array_equal(cat.x,cat15_patches[i].x)
np.testing.assert_array_equal(cat.y,cat15_patches[i].y)
assert cat == cat15_patches[i]
assert cat == cat15.patches[i]
# Check fits
try:
import fitsio
except ImportError:
pass
else:
file_name17 = os.path.join('output','test_cat_centers.fits')
cat8.write(file_name17)
cat17 = treecorr.Catalog(file_name17, ra_col='ra', dec_col='dec', w_col='w',
ra_units='rad', dec_units='rad',
patch_centers=cat8.patch_centers)
assert not cat17.loaded
cat17_patches = cat17.get_patches(low_mem=True)
assert not cat17.loaded # Unlike above (in test_cat_patches) it's still unloaded.
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat17_patches[i].loaded
assert np.all(cat17_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat17_patches[i].ra, cat17.ra[cat17.patch == i])
cat = treecorr.Catalog(file_name17, ra_col='ra', dec_col='dec', w_col='w',
ra_units='rad', dec_units='rad',
patch_centers=cat8.patch_centers, patch=i)
assert cat.patch == cat17.patches[i].patch
np.testing.assert_array_equal(cat.ra,cat17_patches[i].ra)
np.testing.assert_array_equal(cat.dec,cat17_patches[i].dec)
assert cat == cat17_patches[i]
assert cat == cat17.patches[i]
# Check for some invalid values
# Can't have both patch_centers and another patch specification
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, npatch=3)
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, patch=np.ones_like(ra))
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, patch_col=3)
# patch_centers is wrong shape
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file2)
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat9.patch_centers)
with assert_raises(ValueError):
treecorr.Catalog(x=x, y=y, patch_centers=cen_file)
with assert_raises(ValueError):
treecorr.Catalog(x=x, y=y, patch_centers=cat1.patch_centers)
# Missing some patch numbers
with assert_raises(RuntimeError):
c=treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch=np.random.uniform(10,20,len(ra)))
c.get_patch_centers()
def generate_shear_field(nside):
# Generate a random shear field with a well-defined power spectrum.
# It generates shears on a grid nside x nside, and returns, x, y, g1, g2
kvals = np.fft.fftfreq(nside) * 2*np.pi
kx,ky = np.meshgrid(kvals,kvals)
k = kx + 1j*ky
ksq = kx**2 + ky**2
# Use a power spectrum with lots of large scale power.
# The rms shape ends up around 0.2 and min/max are around +-1.
# Having a lot more large-scale than small-scale power means that sample variance is
# very important, so the shot noise estimate of the variance is particularly bad.
Pk = 1.e4 * ksq / (1. + 300.*ksq)**2
# Make complex gaussian field in k-space.
f1 = np.random.normal(size=Pk.shape)
f2 = np.random.normal(size=Pk.shape)
f = (f1 + 1j*f2) * np.sqrt(0.5)
# Make f Hermitian, to correspond to E-mode-only field.
# Hermitian means f(-k) = conj(f(k)).
# Note: this is approximate. It doesn't get all the k=0 and k=nside/2 correct.
# But this is good enough for xi- to be not close to zero.
ikxp = slice(1,(nside+1)//2) # kx > 0
ikxn = slice(-1,nside//2,-1) # kx < 0
ikyp = slice(1,(nside+1)//2) # ky > 0
ikyn = slice(-1,nside//2,-1) # ky < 0
f[ikyp,ikxn] = np.conj(f[ikyn,ikxp])
f[ikyn,ikxn] = np.conj(f[ikyp,ikxp])
# Multiply by the power spectrum to get a realization of a field with this P(k)
f *= Pk
# Inverse fft gives the real-space field.
kappa = nside * np.fft.ifft2(f)
# Multiply by exp(2iphi) to get gamma field, rather than kappa.
ksq[0,0] = 1. # Avoid division by zero
exp2iphi = k**2 / ksq
f *= exp2iphi
gamma = nside * np.fft.ifft2(f)
# Generate x,y values for the real-space field
x,y = np.meshgrid(np.linspace(0.,1000.,nside), np.linspace(0.,1000.,nside))
x = x.ravel()
y = y.ravel()
gamma = gamma.ravel()
kappa = np.real(kappa.ravel())
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_gg_jk():
# Test the variance estimate for GG correlation with jackknife (and other) error estimate.
if __name__ == '__main__':
# 1000 x 1000, so 10^6 points. With jackknifing, that gives 10^4 per region.
nside = 1000
npatch = 64
tol_factor = 1
else:
# Use ~1/10 of the objects when running unit tests
nside = 200
npatch = 16
tol_factor = 8
# The full simulation needs to run a lot of times to get a good estimate of the variance,
# but this takes a long time. So we store the results in the repo.
# To redo the simulation, just delete the file data/test_gg_jk.fits
file_name = 'data/test_gg_jk_{}.npz'.format(nside)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_ggs = []
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nside)
print(run,': ',np.mean(g1),np.std(g1),np.min(g1),np.max(g1))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
gg = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
gg.process(cat)
all_ggs.append(gg)
mean_xip = np.mean([gg.xip for gg in all_ggs], axis=0)
var_xip = np.var([gg.xip for gg in all_ggs], axis=0)
mean_xim = np.mean([gg.xim for gg in all_ggs], axis=0)
var_xim = np.var([gg.xim for gg in all_ggs], axis=0)
mean_varxip = np.mean([gg.varxip for gg in all_ggs], axis=0)
mean_varxim = np.mean([gg.varxim for gg in all_ggs], axis=0)
np.savez(file_name,
mean_xip=mean_xip, mean_xim=mean_xim,
var_xip=var_xip, var_xim=var_xim,
mean_varxip=mean_varxip, mean_varxim=mean_varxim)
data = np.load(file_name)
mean_xip = data['mean_xip']
mean_xim = data['mean_xim']
var_xip = data['var_xip']
var_xim = data['var_xim']
mean_varxip = data['mean_varxip']
mean_varxim = data['mean_varxim']
print('mean_xip = ',mean_xip)
print('mean_xim = ',mean_xim)
print('mean_varxip = ',mean_varxip)
print('mean_varxim = ',mean_varxim)
print('var_xip = ',var_xip)
print('ratio = ',var_xip / mean_varxip)
print('var_xim = ',var_xim)
print('ratio = ',var_xim / mean_varxim)
np.random.seed(1234)
# First run with the normal variance estimate, which is too small.
x, y, g1, g2, _ = generate_shear_field(nside)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
gg1 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
gg1.process(cat)
t1 = time.time()
print('Time for non-patch processing = ',t1-t0)
print('weight = ',gg1.weight)
print('xip = ',gg1.xip)
print('xim = ',gg1.xim)
print('varxip = ',gg1.varxip)
print('varxim = ',gg1.varxim)
print('pullsq for xip = ',(gg1.xip-mean_xip)**2/var_xip)
print('pullsq for xim = ',(gg1.xim-mean_xim)**2/var_xim)
print('max pull for xip = ',np.sqrt(np.max((gg1.xip-mean_xip)**2/var_xip)))
print('max pull for xim = ',np.sqrt(np.max((gg1.xim-mean_xim)**2/var_xim)))
np.testing.assert_array_less((gg1.xip - mean_xip)**2/var_xip, 25) # within 5 sigma
np.testing.assert_array_less((gg1.xim - mean_xim)**2/var_xim, 25)
np.testing.assert_allclose(gg1.varxip, mean_varxip, rtol=0.03 * tol_factor)
np.testing.assert_allclose(gg1.varxim, mean_varxim, rtol=0.03 * tol_factor)
# The naive error estimates only includes shape noise, so it is an underestimate of
# the full variance, which includes sample variance.
np.testing.assert_array_less(mean_varxip, var_xip)
np.testing.assert_array_less(mean_varxim, var_xim)
np.testing.assert_array_less(gg1.varxip, var_xip)
np.testing.assert_array_less(gg1.varxim, var_xim)
# Now run with patches, but still with shot variance. Should be basically the same answer.
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
gg2 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='shot')
t0 = time.time()
gg2.process(cat)
t1 = time.time()
print('Time for shot processing = ',t1-t0)
print('weight = ',gg2.weight)
print('xip = ',gg2.xip)
print('xim = ',gg2.xim)
print('varxip = ',gg2.varxip)
print('varxim = ',gg2.varxim)
np.testing.assert_allclose(gg2.weight, gg1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.xip, gg1.xip, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.xim, gg1.xim, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(gg2.varxip, gg1.varxip, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.varxim, gg1.varxim, rtol=1.e-2*tol_factor)
# Can get this as a (diagonal) covariance matrix using estimate_cov
np.testing.assert_allclose(gg2.estimate_cov('shot'),
np.diag(np.concatenate([gg2.varxip, gg2.varxim])))
# Now run with jackknife variance estimate. Should be much better.
gg3 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
gg3.process(cat)
t1 = time.time()
print('Time for jackknife processing = ',t1-t0)
print('xip = ',gg3.xip)
print('xim = ',gg3.xim)
print('varxip = ',gg3.varxip)
print('ratio = ',gg3.varxip / var_xip)
print('varxim = ',gg3.varxim)
print('ratio = ',gg3.varxim / var_xim)
np.testing.assert_allclose(gg3.weight, gg2.weight)
np.testing.assert_allclose(gg3.xip, gg2.xip)
np.testing.assert_allclose(gg3.xim, gg2.xim)
# Not perfect, but within about 30%.
np.testing.assert_allclose(gg3.varxip, var_xip, rtol=0.3*tol_factor)
np.testing.assert_allclose(gg3.varxim, var_xim, rtol=0.3*tol_factor)
# Can get the covariance matrix using estimate_cov, which is also stored as cov attribute
t0 = time.time()
np.testing.assert_allclose(gg3.estimate_cov('jackknife'), gg3.cov)
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# Can also get the shot covariance matrix using estimate_cov
np.testing.assert_allclose(gg3.estimate_cov('shot'),
np.diag(np.concatenate([gg2.varxip, gg2.varxim])))
# And can even get the jackknife covariance from a run that used var_method='shot'
np.testing.assert_allclose(gg2.estimate_cov('jackknife'), gg3.cov)
# Check that cross-covariance between xip and xim is significant.
n = gg3.nbins
print('cross covariance = ',gg3.cov[:n,n:],np.sum(gg3.cov[n:,n:]**2))
# Make cross correlation matrix
c = gg3.cov[:n,n:] / (np.sqrt(gg3.varxip)[:,np.newaxis] * np.sqrt(gg3.varxim)[np.newaxis,:])
print('cross correlation = ',c)
assert np.sum(c**2) > 1.e-2 # Should be significantly non-zero
assert np.all(np.abs(c) < 1.) # And all are between -1 and -1.
# If gg2 and gg3 were two different calculations, can use
# estimate_multi_cov to get combined covariance
t0 = time.time()
cov23 = treecorr.estimate_multi_cov([gg2,gg3], 'jackknife')
t1 = time.time()
print('Time for jackknife cross-covariance = ',t1-t0)
np.testing.assert_allclose(cov23[:2*n,:2*n], gg3.cov)
np.testing.assert_allclose(cov23[2*n:,2*n:], gg3.cov)
# In this case, they aren't different, so they are perfectly correlated.
np.testing.assert_allclose(cov23[:2*n,2*n:], gg3.cov)
np.testing.assert_allclose(cov23[2*n:,:2*n], gg3.cov)
# Check sample covariance estimate
treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='sample')
t0 = time.time()
cov_sample = gg3.estimate_cov('sample')
t1 = time.time()
print('Time to calculate sample covariance = ',t1-t0)
print('varxip = ',cov_sample.diagonal()[:n])
print('ratio = ',cov_sample.diagonal()[:n] / var_xip)
print('varxim = ',cov_sample.diagonal()[n:])
print('ratio = ',cov_sample.diagonal()[n:] / var_xim)
# It's not too bad ast small scales, but at larger scales the variance in the number of pairs
# among the different samples gets bigger (since some are near the edge, and others not).
# So this is only good to a little worse than a factor of 2.
np.testing.assert_allclose(cov_sample.diagonal()[:n], var_xip, rtol=0.5*tol_factor)
np.testing.assert_allclose(cov_sample.diagonal()[n:], var_xim, rtol=0.5*tol_factor)
# Check marked-point bootstrap covariance estimate
treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='marked_bootstrap')
t0 = time.time()
cov_boot = gg3.estimate_cov('marked_bootstrap')
t1 = time.time()
print('Time to calculate marked_bootstrap covariance = ',t1-t0)
print('varxip = ',cov_boot.diagonal()[:n])
print('ratio = ',cov_boot.diagonal()[:n] / var_xip)
print('varxim = ',cov_boot.diagonal()[n:])
print('ratio = ',cov_boot.diagonal()[n:] / var_xim)
# Not really much better than sample.
np.testing.assert_allclose(cov_boot.diagonal()[:n], var_xip, rtol=0.6*tol_factor)
np.testing.assert_allclose(cov_boot.diagonal()[n:], var_xim, rtol=0.5*tol_factor)
# Check bootstrap covariance estimate.
treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='bootstrap')
t0 = time.time()
cov_boot = gg3.estimate_cov('bootstrap')
t1 = time.time()
print('Time to calculate bootstrap covariance = ',t1-t0)
print('varxip = ',cov_boot.diagonal()[:n])
print('ratio = ',cov_boot.diagonal()[:n] / var_xip)
print('varxim = ',cov_boot.diagonal()[n:])
print('ratio = ',cov_boot.diagonal()[n:] / var_xim)
np.testing.assert_allclose(cov_boot.diagonal()[:n], var_xip, rtol=0.3*tol_factor)
np.testing.assert_allclose(cov_boot.diagonal()[n:], var_xim, rtol=0.4*tol_factor)
# Check some invalid actions
# Bad var_method
with assert_raises(ValueError):
gg2.estimate_cov('invalid')
# Not run on patches, but need patches
with assert_raises(ValueError):
gg1.estimate_cov('jackknife')
with assert_raises(ValueError):
gg1.estimate_cov('sample')
with assert_raises(ValueError):
gg1.estimate_cov('marked_bootstrap')
with assert_raises(ValueError):
gg1.estimate_cov('bootstrap')
# All of them need to use patches
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'jackknife')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'jackknife')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'sample')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'sample')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'marked_bootstrap')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'marked_bootstrap')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg1, gg2],'bootstrap')
with assert_raises(ValueError):
treecorr.estimate_multi_cov([gg2, gg1],'bootstrap')
# All need to use the same patches
cat3 = treecorr.Catalog(x=x[:100], y=y[:100], g1=g1[:100], g2=g2[:100], npatch=7)
gg3 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
gg3.process(cat3)
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'jackknife')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'jackknife')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'sample')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'sample')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'marked_bootstrap')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'marked_bootstrap')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg3, gg2],'bootstrap')
with assert_raises(RuntimeError):
treecorr.estimate_multi_cov([gg2, gg3],'bootstrap')
@timer
def test_ng_jk():
# Test the variance estimate for NG correlation with jackknife error estimate.
if __name__ == '__main__':
# 1000 x 1000, so 10^6 points. With jackknifing, that gives 10^4 per region.
nside = 1000
nlens = 50000
npatch = 64
tol_factor = 1
else:
# If much smaller, then there can be no lenses in some patches, so only 1/4 the galaxies
# and use more than half the number of patches
nside = 200
nlens = 3000
npatch = 8
tol_factor = 4
file_name = 'data/test_ng_jk_{}.npz'.format(nside)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_ngs = []
for run in range(nruns):
x, y, g1, g2, k = generate_shear_field(nside)
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
print(run,': ',np.mean(g1),np.std(g1),np.min(g1),np.max(g1),thresh)
cat1 = treecorr.Catalog(x=x, y=y, w=w)
cat2 = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ng = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
ng.process(cat1, cat2)
all_ngs.append(ng)
mean_xi = np.mean([ng.xi for ng in all_ngs], axis=0)
var_xi = np.var([ng.xi for ng in all_ngs], axis=0)
mean_varxi = np.mean([ng.varxi for ng in all_ngs], axis=0)
np.savez(file_name,
mean_xi=mean_xi, var_xi=var_xi, mean_varxi=mean_varxi)
data = np.load(file_name)
mean_xi = data['mean_xi']
var_xi = data['var_xi']
mean_varxi = data['mean_varxi']
print('mean_xi = ',mean_xi)
print('mean_varxi = ',mean_varxi)
print('var_xi = ',var_xi)
print('ratio = ',var_xi / mean_varxi)
np.random.seed(1234)
# First run with the normal variance estimate, which is too small.
x, y, g1, g2, k = generate_shear_field(nside)
thresh = np.partition(k.flatten(), -nlens)[-nlens]
w = np.zeros_like(k)
w[k>=thresh] = 1.
cat1 = treecorr.Catalog(x=x, y=y, w=w)
cat2 = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ng1 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
ng1.process(cat1, cat2)
t1 = time.time()
print('Time for non-patch processing = ',t1-t0)
print('weight = ',ng1.weight)
print('xi = ',ng1.xi)
print('varxi = ',ng1.varxi)
print('pullsq for xi = ',(ng1.xi-mean_xi)**2/var_xi)
print('max pull for xi = ',np.sqrt(np.max((ng1.xi-mean_xi)**2/var_xi)))
np.testing.assert_array_less((ng1.xi - mean_xi)**2/var_xi, 25) # within 5 sigma
np.testing.assert_allclose(ng1.varxi, mean_varxi, rtol=0.03 * tol_factor)
# The naive error estimates only includes shape noise, so it is an underestimate of
# the full variance, which includes sample variance.
np.testing.assert_array_less(mean_varxi, var_xi)
np.testing.assert_array_less(ng1.varxi, var_xi)
# Now run with patches, but still with shot variance. Should be basically the same answer.
# Note: This turns out to work significantly better if cat1 is used to make the patches.
# Otherwise the number of lenses per patch varies a lot, which affects the variance estimate.
# But that means we need to keep the w=0 object in the catalog, so all objects get a patch.
cat1p = treecorr.Catalog(x=x, y=y, w=w, npatch=npatch, keep_zero_weight=True)
cat2p = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, patch=cat1p.patch)
print('tot w = ',np.sum(w))
print('Patch\tNlens')
for i in range(npatch):
print('%d\t%d'%(i,np.sum(cat2p.w[cat2p.patch==i])))
ng2 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='shot')
t0 = time.time()
ng2.process(cat1p, cat2p)
t1 = time.time()
print('Time for shot processing = ',t1-t0)
print('weight = ',ng2.weight)
print('xi = ',ng2.xi)
print('varxi = ',ng2.varxi)
np.testing.assert_allclose(ng2.weight, ng1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(ng2.xi, ng1.xi, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng2.varxi, ng1.varxi, rtol=1.e-2*tol_factor)
# Can get this as a (diagonal) covariance matrix using estimate_cov
np.testing.assert_allclose(ng2.estimate_cov('shot'), np.diag(ng2.varxi))
np.testing.assert_allclose(ng1.estimate_cov('shot'), np.diag(ng1.varxi))
# Now run with jackknife variance estimate. Should be much better.
ng3 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
ng3.process(cat1p, cat2p)
t1 = time.time()
print('Time for jackknife processing = ',t1-t0)
print('xi = ',ng3.xi)
print('varxi = ',ng3.varxi)
print('ratio = ',ng3.varxi / var_xi)
np.testing.assert_allclose(ng3.weight, ng2.weight)
np.testing.assert_allclose(ng3.xi, ng2.xi)
np.testing.assert_allclose(ng3.varxi, var_xi, rtol=0.3*tol_factor)
# Check using estimate_cov
t0 = time.time()
np.testing.assert_allclose(ng3.estimate_cov('jackknife'), ng3.cov)
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# Check only using patches for one of the two catalogs.
# Not as good as using patches for both, but not much worse.
ng4 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
ng4.process(cat1p, cat2)
t1 = time.time()
print('Time for only patches for cat1 processing = ',t1-t0)
print('weight = ',ng4.weight)
print('xi = ',ng4.xi)
print('varxi = ',ng4.varxi)
np.testing.assert_allclose(ng4.weight, ng1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(ng4.xi, ng1.xi, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng4.varxi, var_xi, rtol=0.5*tol_factor)
ng5 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife')
t0 = time.time()
ng5.process(cat1, cat2p)
t1 = time.time()
print('Time for only patches for cat2 processing = ',t1-t0)
print('weight = ',ng5.weight)
print('xi = ',ng5.xi)
print('varxi = ',ng5.varxi)
np.testing.assert_allclose(ng5.weight, ng1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(ng5.xi, ng1.xi, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(ng5.varxi, var_xi, rtol=0.4*tol_factor)
# Check sample covariance estimate
t0 = time.time()
cov_sample = ng3.estimate_cov('sample')
t1 = time.time()
print('Time to calculate sample covariance = ',t1-t0)
print('varxi = ',cov_sample.diagonal())
print('ratio = ',cov_sample.diagonal() / var_xi)
np.testing.assert_allclose(cov_sample.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_sample = ng4.estimate_cov('sample')
print('varxi = ',cov_sample.diagonal())
print('ratio = ',cov_sample.diagonal() / var_xi)
np.testing.assert_allclose(cov_sample.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_sample = ng5.estimate_cov('sample')
print('varxi = ',cov_sample.diagonal())
print('ratio = ',cov_sample.diagonal() / var_xi)
np.testing.assert_allclose(cov_sample.diagonal(), var_xi, rtol=0.5*tol_factor)
# Check marked_bootstrap covariance estimate
t0 = time.time()
cov_boot = ng3.estimate_cov('marked_bootstrap')
t1 = time.time()
print('Time to calculate marked_bootstrap covariance = ',t1-t0)
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_boot = ng4.estimate_cov('marked_bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.6*tol_factor)
cov_boot = ng5.estimate_cov('marked_bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.5*tol_factor)
# Check bootstrap covariance estimate.
t0 = time.time()
cov_boot = ng3.estimate_cov('bootstrap')
t1 = time.time()
print('Time to calculate bootstrap covariance = ',t1-t0)
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.2*tol_factor)
cov_boot = ng4.estimate_cov('bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.5*tol_factor)
cov_boot = ng5.estimate_cov('bootstrap')
print('varxi = ',cov_boot.diagonal())
print('ratio = ',cov_boot.diagonal() / var_xi)
np.testing.assert_allclose(cov_boot.diagonal(), var_xi, rtol=0.4*tol_factor)
# Use a random catalog
# In this case the locations of the source catalog are fine to use as our random catalog,
# since they fill the region where the lenses are allowed to be.
rg4 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
rg4.process(cat2p, cat2p)
t1 = time.time()
print('Time for processing RG = ',t1-t0)
ng4 = ng3.copy()
ng4.calculateXi(rg4)
print('xi = ',ng4.xi)
print('varxi = ',ng4.varxi)
print('ratio = ',ng4.varxi / var_xi)
np.testing.assert_allclose(ng4.weight, ng3.weight, rtol=0.02*tol_factor)
np.testing.assert_allclose(ng4.xi, ng3.xi, rtol=0.02*tol_factor)
np.testing.assert_allclose(ng4.varxi, var_xi, rtol=0.3*tol_factor)
# Check using estimate_cov
t0 = time.time()
cov = ng4.estimate_cov('jackknife')
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# The covariance has more terms that differ. 3x5 is the largest difference, needing rtol=0.4.
# I think this is correct -- mostly this is testing that I didn't totally mess up the
# weight normalization when applying the RG to the patches.
np.testing.assert_allclose(cov, ng3.cov, rtol=0.4*tol_factor, atol=3.e-6*tol_factor)
# Use a random catalog without patches.
rg5 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
rg5.process(cat2, cat2p)
t1 = time.time()
print('Time for processing RG = ',t1-t0)
ng5 = ng3.copy()
ng5.calculateXi(rg5)
print('xi = ',ng5.xi)
print('varxi = ',ng5.varxi)
print('ratio = ',ng5.varxi / var_xi)
|
np.testing.assert_allclose(ng5.weight, ng3.weight, rtol=0.02*tol_factor)
|
numpy.testing.assert_allclose
|
# plot.py
# Created by <NAME> on 2018-10-19.
# Email: <EMAIL>
# Copyright (c) 2018. All rights reserved.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from ..utils import import_graph, pass_to_ranks
from ..embed import selectSVD
from sklearn.utils import check_array, check_consistent_length
from mpl_toolkits.axes_grid1 import make_axes_locatable
def _check_common_inputs(
figsize=None,
height=None,
title=None,
context=None,
font_scale=None,
legend_name=None,
):
# Handle figsize
if figsize is not None:
if not isinstance(figsize, tuple):
msg = "figsize must be a tuple, not {}.".format(type(figsize))
raise TypeError(msg)
# Handle heights
if height is not None:
if not isinstance(height, (int, float)):
msg = "height must be an integer or float, not {}.".format(type(height))
raise TypeError(msg)
# Handle title
if title is not None:
if not isinstance(title, str):
msg = "title must be a string, not {}.".format(type(title))
raise TypeError(msg)
# Handle context
if context is not None:
if not isinstance(context, str):
msg = "context must be a string, not {}.".format(type(context))
raise TypeError(msg)
elif not context in ["paper", "notebook", "talk", "poster"]:
msg = "context must be one of (paper, notebook, talk, poster), \
not {}.".format(
context
)
raise ValueError(msg)
# Handle font_scale
if font_scale is not None:
if not isinstance(font_scale, (int, float)):
msg = "font_scale must be an integer or float, not {}.".format(
type(font_scale)
)
raise TypeError(msg)
# Handle legend name
if legend_name is not None:
if not isinstance(legend_name, str):
msg = "legend_name must be a string, not {}.".format(type(legend_name))
raise TypeError(msg)
def _transform(arr, method):
if method is not None:
if method == "log":
# arr = np.log(arr, where=(arr > 0))
# hacky, but np.log(arr, where=arr>0) is really buggy
arr = arr.copy()
arr[arr > 0] = np.log(arr[arr > 0])
elif method in ["zero-boost", "simple-all", "simple-nonzero"]:
arr = pass_to_ranks(arr, method=method)
else:
msg = "Transform must be one of {log, zero-boost, simple-all, \
simple-nonzero, not {}.".format(
method
)
raise ValueError(msg)
return arr
def heatmap(
X,
transform=None,
figsize=(10, 10),
title=None,
context="talk",
font_scale=1,
xticklabels=False,
yticklabels=False,
cmap="RdBu_r",
center=0,
cbar=True,
inner_hier_labels=None,
outer_hier_labels=None,
):
r"""
Plots a graph as a heatmap.
Parameters
----------
X : nx.Graph or np.ndarray object
Graph or numpy matrix to plot
transform : None, or string {'log', 'zero-boost', 'simple-all', 'simple-nonzero'}
- 'log' :
Plots the log of all nonzero numbers
- 'zero-boost' :s
Pass to ranks method. preserves the edge weight for all 0s, but ranks
the other edges as if the ranks of all 0 edges has been assigned.
- 'simple-all':
Pass to ranks method. Assigns ranks to all non-zero edges, settling
ties using the average. Ranks are then scaled by
:math:`\frac{2 rank(\text{non-zero edges})}{n^2 + 1}`
where n is the number of nodes
- 'simple-nonzero':
Pass to ranks method. Aame as simple-all, but ranks are scaled by
:math:`\frac{2 rank(\text{non-zero edges})}{\text{total non-zero edges} + 1}`
figsize : tuple of integers, optional, default: (10, 10)
Width, height in inches.
title : str, optional, default: None
Title of plot.
context : None, or one of {paper, notebook, talk (default), poster}
The name of a preconfigured set.
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
xticklabels, yticklabels : bool or list, optional
If list-like, plot these alternate labels as the ticklabels.
cmap : str, default: 'RdBu_r'
Valid matplotlib color map.
center : float, default: 0
The value at which to center the colormap
cbar : bool, default: True
Whether to draw a colorbar.
inner_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes. If not None, will group the nodes
according to these labels and plot the labels on the marginal
outer_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes, ignored without `inner_hier_labels`
If not None, will plot these labels as the second level of a hierarchy on the
marginals
"""
_check_common_inputs(
figsize=figsize, title=title, context=context, font_scale=font_scale
)
# Handle ticklabels
if isinstance(xticklabels, list):
if len(xticklabels) != X.shape[1]:
msg = "xticklabels must have same length {}.".format(X.shape[1])
raise ValueError(msg)
elif not isinstance(xticklabels, bool):
msg = "xticklabels must be a bool or a list, not {}".format(type(xticklabels))
raise TypeError(msg)
if isinstance(yticklabels, list):
if len(yticklabels) != X.shape[0]:
msg = "yticklabels must have same length {}.".format(X.shape[0])
raise ValueError(msg)
elif not isinstance(yticklabels, bool):
msg = "yticklabels must be a bool or a list, not {}".format(type(yticklabels))
raise TypeError(msg)
# Handle cmap
if not isinstance(cmap, str):
msg = "cmap must be a string, not {}.".format(type(cmap))
raise TypeError(msg)
# Handle center
if center is not None:
if not isinstance(center, (int, float)):
msg = "center must be a integer or float, not {}.".format(type(center))
raise TypeError(msg)
# Handle cbar
if not isinstance(cbar, bool):
msg = "cbar must be a bool, not {}.".format(type(center))
raise TypeError(msg)
check_consistent_length(X, inner_hier_labels, outer_hier_labels)
arr = import_graph(X)
arr = _transform(arr, transform)
if inner_hier_labels is not None:
if outer_hier_labels is None:
arr = _sort_graph(arr, inner_hier_labels, np.ones_like(inner_hier_labels))
else:
arr = _sort_graph(arr, inner_hier_labels, outer_hier_labels)
# Global plotting settings
CBAR_KWS = dict(shrink=0.7)
with sns.plotting_context(context, font_scale=font_scale):
fig, ax = plt.subplots(figsize=figsize)
plot = sns.heatmap(
arr,
cmap=cmap,
square=True,
xticklabels=xticklabels,
yticklabels=yticklabels,
cbar_kws=CBAR_KWS,
center=center,
cbar=cbar,
ax=ax,
)
if title is not None:
plot.set_title(title)
if inner_hier_labels is not None:
if outer_hier_labels is not None:
plot.set_yticklabels([])
plot.set_xticklabels([])
_plot_groups(
plot, arr[0].shape[0], inner_hier_labels, outer_hier_labels
)
else:
_plot_groups(plot, arr[0].shape[0], inner_hier_labels)
return plot
def gridplot(
X,
labels=None,
transform=None,
height=10,
title=None,
context="talk",
font_scale=1,
alpha=0.7,
sizes=(10, 200),
palette="Set1",
legend_name="Type",
inner_hier_labels=None,
outer_hier_labels=None,
):
r"""
Plots multiple graphs as a grid, with intensity denoted by the size
of dots on the grid.
Parameters
----------
X : list of nx.Graph or np.ndarray object
List of nx.Graph or numpy arrays to plot
labels : list of str
List of strings, which are labels for each element in X.
`len(X) == len(labels)`.
transform : None, or string {'log', 'zero-boost', 'simple-all', 'simple-nonzero'}
- 'log' :
Plots the log of all nonzero numbers
- 'zero-boost' :
Pass to ranks method. preserves the edge weight for all 0s, but ranks
the other edges as if the ranks of all 0 edges has been assigned.
- 'simple-all':
Pass to ranks method. Assigns ranks to all non-zero edges, settling
ties using the average. Ranks are then scaled by
:math:`\frac{2 rank(\text{non-zero edges})}{n^2 + 1}`
where n is the number of nodes
- 'simple-nonzero':
Pass to ranks method. Same as simple-all, but ranks are scaled by
:math:`\frac{2 rank(\text{non-zero edges})}{\text{total non-zero edges} + 1}`
height : int, optional, default: 10
Height of figure in inches.
title : str, optional, default: None
Title of plot.
context : None, or one of {paper, notebook, talk (default), poster}
The name of a preconfigured set.
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
palette : str, dict, optional, default: 'Set1'
Set of colors for mapping the `hue` variable. If a dict, keys should
be values in the hue variable
alpha : float [0, 1], default : 0.7
alpha value of plotted gridplot points
sizes : length 2 tuple, default: (10, 200)
min and max size to plot edge weights
legend_name : string, default: 'Type'
Name to plot above the legend
inner_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes. If not None, will group the nodes
according to these labels and plot the labels on the marginal
outer_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes, ignored without `inner_hier_labels`
If not None, will plot these labels as the second level of a hierarchy on the
marginals
"""
_check_common_inputs(
height=height, title=title, context=context, font_scale=font_scale
)
if isinstance(X, list):
graphs = [import_graph(x) for x in X]
else:
msg = "X must be a list, not {}.".format(type(X))
raise TypeError(msg)
check_consistent_length(X, labels, inner_hier_labels, outer_hier_labels)
graphs = [_transform(arr, transform) for arr in graphs]
if inner_hier_labels is not None:
if outer_hier_labels is None:
graphs = [
_sort_graph(arr, inner_hier_labels, np.ones_like(inner_hier_labels))
for arr in graphs
]
else:
graphs = [
_sort_graph(arr, inner_hier_labels, outer_hier_labels) for arr in graphs
]
if isinstance(palette, str):
palette = sns.color_palette(palette, desat=0.75, n_colors=len(labels))
dfs = []
for idx, graph in enumerate(graphs):
rdx, cdx = np.where(graph > 0)
weights = graph[(rdx, cdx)]
df = pd.DataFrame(
np.vstack([rdx + 0.5, cdx + 0.5, weights]).T,
columns=["rdx", "cdx", "Weights"],
)
df[legend_name] = [labels[idx]] * len(cdx)
dfs.append(df)
df = pd.concat(dfs, axis=0)
with sns.plotting_context(context, font_scale=font_scale):
sns.set_style("white")
plot = sns.relplot(
data=df,
x="cdx",
y="rdx",
hue=legend_name,
size="Weights",
sizes=sizes,
alpha=alpha,
palette=palette,
height=height,
facet_kws={
"sharex": True,
"sharey": True,
"xlim": (0, graph.shape[0] + 1),
"ylim": (0, graph.shape[0] + 1),
},
)
plot.ax.axis("off")
plot.ax.invert_yaxis()
if title is not None:
plot.set(title=title)
if inner_hier_labels is not None:
if outer_hier_labels is not None:
_plot_groups(
plot.ax, graphs[0].shape[0], inner_hier_labels, outer_hier_labels
)
else:
_plot_groups(plot.ax, graphs[0].shape[0], inner_hier_labels)
return plot
# TODO would it be cool if pairplot reduced to single plot
def pairplot(
X,
labels=None,
col_names=None,
title=None,
legend_name=None,
variables=None,
height=2.5,
context="talk",
font_scale=1,
palette="Set1",
alpha=0.7,
size=50,
marker=".",
):
r"""
Plot pairwise relationships in a dataset.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Y : array-like or list, shape (n_samples), optional
Labels that correspond to each sample in X.
col_names : array-like or list, shape (n_features), optional
Names or labels for each feature in X. If not provided, the default
will be `Dimension 1, Dimension 2, etc`.
title : str, optional, default: None
Title of plot.
legend_name : str, optional, default: None
Title of the legend.
variables : list of variable names, optional
Variables to plot based on col_names, otherwise use every column with
a numeric datatype.
height : int, optional, default: 10
Height of figure in inches.
context : None, or one of {paper, notebook, talk (default), poster}
The name of a preconfigured set.
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
palette : str, dict, optional, default: 'Set1'
Set of colors for mapping the `hue` variable. If a dict, keys should
be values in the hue variable.
alpha : float, optional, default: 0.7
opacity value of plotter markers between 0 and 1
size : float or int, optional, default: 50
size of plotted markers
marker : string, optional, default: '.'
matplotlib style marker specification
https://matplotlib.org/api/markers_api.html
"""
_check_common_inputs(
height=height,
title=title,
context=context,
font_scale=font_scale,
legend_name=legend_name,
)
# Handle X
if not isinstance(X, (list, np.ndarray)):
msg = "X must be array-like, not {}.".format(type(X))
raise TypeError(msg)
# Handle Y
if labels is not None:
if not isinstance(labels, (list, np.ndarray)):
msg = "Y must be array-like or list, not {}.".format(type(labels))
raise TypeError(msg)
elif X.shape[0] != len(labels):
msg = "Expected length {}, but got length {} instead for Y.".format(
X.shape[0], len(labels)
)
raise ValueError(msg)
# Handle col_names
if col_names is None:
col_names = ["Dimension {}".format(i) for i in range(1, X.shape[1] + 1)]
elif not isinstance(col_names, list):
msg = "col_names must be a list, not {}.".format(type(col_names))
raise TypeError(msg)
elif X.shape[1] != len(col_names):
msg = "Expected length {}, but got length {} instead for col_names.".format(
X.shape[1], len(col_names)
)
raise ValueError(msg)
# Handle variables
if variables is not None:
if len(variables) > len(col_names):
msg = "variables cannot contain more elements than col_names."
raise ValueError(msg)
else:
for v in variables:
if v not in col_names:
msg = "{} is not a valid key.".format(v)
raise KeyError(msg)
else:
variables = col_names
diag_kind = "auto"
df = pd.DataFrame(X, columns=col_names)
if labels is not None:
if legend_name is None:
legend_name = "Type"
df_labels = pd.DataFrame(labels, columns=[legend_name])
df = pd.concat([df_labels, df], axis=1)
names, counts = np.unique(labels, return_counts=True)
if counts.min() < 2:
diag_kind = "hist"
plot_kws = dict(
alpha=alpha,
s=size,
# edgecolor=None, # could add this latter
linewidth=0,
marker=marker,
)
with sns.plotting_context(context=context, font_scale=font_scale):
if labels is not None:
pairs = sns.pairplot(
df,
hue=legend_name,
vars=variables,
height=height,
palette=palette,
diag_kind=diag_kind,
plot_kws=plot_kws,
)
else:
pairs = sns.pairplot(
df,
vars=variables,
height=height,
palette=palette,
diag_kind=diag_kind,
plot_kws=plot_kws,
)
pairs.set(xticks=[], yticks=[])
pairs.fig.subplots_adjust(top=0.945)
pairs.fig.suptitle(title)
return pairs
def _distplot(
data,
labels=None,
direction="out",
title="",
context="talk",
font_scale=1,
figsize=(10, 5),
palette="Set1",
xlabel="",
ylabel="Density",
):
fig = plt.figure(figsize=figsize)
ax = plt.gca()
palette = sns.color_palette(palette)
plt_kws = {"cumulative": True}
with sns.plotting_context(context=context, font_scale=font_scale):
if labels is not None:
categories, counts = np.unique(labels, return_counts=True)
for i, cat in enumerate(categories):
cat_data = data[np.where(labels == cat)]
if counts[i] > 1 and cat_data.min() != cat_data.max():
x = np.sort(cat_data)
y = np.arange(len(x)) / float(len(x))
plt.plot(x, y, label=cat, color=palette[i])
else:
ax.axvline(cat_data[0], label=cat, color=palette[i])
plt.legend()
else:
if data.min() != data.max():
sns.distplot(data, hist=False, kde_kws=plt_kws)
else:
ax.axvline(data[0])
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return ax
def degreeplot(
X,
labels=None,
direction="out",
title="Degree plot",
context="talk",
font_scale=1,
figsize=(10, 5),
palette="Set1",
):
r"""
Plots the distribution of node degrees for the input graph.
Allows for sets of node labels, will plot a distribution for each
node category.
Parameters
----------
X : np.ndarray (2D)
input graph
labels : 1d np.ndarray or list, same length as dimensions of X
labels for different categories of graph nodes
direction : string, ('out', 'in')
for a directed graph, whether to plot out degree or in degree
title : string, default : 'Degree plot'
plot title
context : None, or one of {talk (default), paper, notebook, poster}
Seaborn plotting context
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
palette : str, dict, optional, default: 'Set1'
Set of colors for mapping the `hue` variable. If a dict, keys should
be values in the hue variable.
figsize : tuple of length 2, default (10, 5)
size of the figure (width, height)
Returns
-------
ax : matplotlib axis object
"""
_check_common_inputs(
figsize=figsize, title=title, context=context, font_scale=font_scale
)
check_array(X)
if direction == "out":
axis = 0
check_consistent_length((X, labels))
elif direction == "in":
axis = 1
check_consistent_length((X.T, labels))
else:
raise ValueError('direction must be either "out" or "in"')
degrees = np.count_nonzero(X, axis=axis)
ax = _distplot(
degrees,
labels=labels,
title=title,
context=context,
font_scale=font_scale,
figsize=figsize,
palette=palette,
xlabel="Node degree",
)
return ax
def edgeplot(
X,
labels=None,
nonzero=False,
title="Edge plot",
context="talk",
font_scale=1,
figsize=(10, 5),
palette="Set1",
):
r"""
Plots the distribution of edge weights for the input graph.
Allows for sets of node labels, will plot edge weight distribution
for each node category.
Parameters
----------
X : np.ndarray (2D)
input graph
labels : 1d np.ndarray or list, same length as dimensions of X
labels for different categories of graph nodes
nonzero : boolean, default: False
whether to restrict the edgeplot to only the non-zero edges
title : string, default : 'Degree plot'
plot title
context : None, or one of {talk (default), paper, notebook, poster}
Seaborn plotting context
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
palette : str, dict, optional, default: 'Set1'
Set of colors for mapping the `hue` variable. If a dict, keys should
be values in the hue variable.
figsize : tuple of length 2, default (10, 5)
size of the figure (width, height)
Returns
-------
ax : matplotlib axis object
"""
_check_common_inputs(
figsize=figsize, title=title, context=context, font_scale=font_scale
)
check_array(X)
check_consistent_length((X, labels))
edges = X.ravel()
labels =
|
np.tile(labels, (1, X.shape[1]))
|
numpy.tile
|
"""
Modulo
Neural Modular Networks in PyTorch
train.py
"""
import argparse
import pickle
import os
import torch
from tqdm import tqdm
from torch.nn import BCEWithLogitsLoss
from torch.nn.functional import sigmoid
from torch.autograd import Variable
from torch.nn.utils.clip_grad import clip_grad_norm
import numpy as np
from torch.optim import Adam, SGD, Adadelta, RMSprop
import time
from random import shuffle
from seq2seq import AttentionSeq2Seq
from modules import *
from preprocessing import read_datasets
parser = argparse.ArgumentParser()
# General training arguments
parser.add_argument('--pickle_path', help='Path to pickle file generated '
'during the preprocessing step. '
'This file must be created prior '
'to training.',
type=str, default='modulo.pkl')
parser.add_argument('--epochs', help='Number of epochs to train the model',
type=int, default=1000)
parser.add_argument('--visdom',
help='Boolean flag for using Visdom visualization',
type=bool, default=True)
parser.add_argument('--checkpoint_path', help='Path to store checkpoint TAR '
'file during training',
type=str, default='checkpoint.tar')
parser.add_argument('--checkpoint_freq', help='Save a checkpoint every '
'checkpoint_freq epochs',
type=int, default=100)
parser.add_argument('--optimizer', help='Training optimizer', type=str,
default='adam',
choices={'adam', 'sgd', 'adadelta', 'rmsprop'})
parser.add_argument('--learning_rate', help='Optimizer learning rate',
type=float, default=0.001)
parser.add_argument('-use_weight_decay',
help='Boolean flag for using weight decay',
type=bool, default=True)
parser.add_argument('--weight_decay',
help='Optimizer weight decay (L2 regularization)',
type=float, default=0)
parser.add_argument('--use_gradient_clipping',
help='Boolean flag for using gradient clipping',
type=bool, default=True)
parser.add_argument('--max_grad_norm', help='Max norm for gradient clipping',
type=float, default=10)
# Seq2Seq RNN arguments
parser.add_argument('--word_dim', help='Word embedding dimension',
type=int, default=256)
parser.add_argument('--hidden_dim', help='LSTM hidden dimension',
type=int, default=256)
parser.add_argument('--num_layers', help='Number of LSTM layers for encoder '
'and decoder', type=int, default=2)
parser.add_argument('--use_dropout',
help='Boolean flag for using dropout in LSTM layers '
'(except the final layer as usual)',
type=bool, default=True)
parser.add_argument('--dropout',
help='Dropout ratio in encoder/decoder LSTM',
type=float, default=0.5)
args = parser.parse_args()
assert(os.path.exists(args.pickle_path)), 'Provided pickle path is invalid'
print('Loading preprocessed pickle...')
with open(args.pickle_path, 'rb') as f:
state_dict = pickle.load(f)
print('...done.')
if state_dict['GPU_SUPPORT']:
from torch.cuda import FloatTensor as FloatTensor
else:
from torch import FloatTensor
training_task = SHAPESModuloTask()
param_list = []
for mod in training_task.module_dict.values():
param_list.extend(list(mod.parameters()))
if state_dict['GPU_SUPPORT']:
mod.cuda()
attn_seq2seq = AttentionSeq2Seq(
vocab_size_1=len(state_dict['VOCAB']),
vocab_size_2=len(state_dict['TOKENS']),
word_dim=args.word_dim,
hidden_dim=args.hidden_dim,
batch_size=state_dict['BATCH_SIZE'],
num_layers=args.num_layers,
use_dropout=args.use_dropout,
dropout=args.dropout,
use_cuda=state_dict['GPU_SUPPORT']
)
param_list.extend(list(attn_seq2seq.parameters()))
print('Number of trainable paramters: {}'.format(
sum(param.numel() for param in param_list if param.requires_grad)))
if state_dict['GPU_SUPPORT']:
attn_seq2seq.cuda()
loss = BCEWithLogitsLoss()
if args.optimizer == 'adam':
optimizer = Adam(
params=param_list,
lr=args.learning_rate,
weight_decay=args.weight_decay
)
elif args.optimizer == 'sgd':
optimizer = SGD(
params=param_list,
lr=args.learning_rate,
weight_decay=args.weight_decay
)
elif args.optimizer == 'adadelta':
optimizer = Adadelta(
params=param_list,
lr=args.learning_rate,
weight_decay=args.weight_decay
)
elif args.optimizer == 'rmsprop':
optimizer = RMSprop(
params=param_list,
lr=args.learning_rate,
weight_decay=args.weight_decay
)
else:
raise ValueError('{} is not a supported optimizer'.format(args.optimizer))
train_query_list, train_layout_list, train_answer_list = \
read_datasets(state_dict['QUERY_TRAIN'], state_dict['LAYOUT_TRAIN'],
state_dict['ANSWER_TRAIN'], return_unique=False)
valid_query_list, valid_layout_list, valid_answer_list = \
read_datasets(state_dict['QUERY_VALID'], state_dict['LAYOUT_VALID'],
state_dict['ANSWER_VALID'], return_unique=False)
test_query_list, test_layout_list, test_answer_list = \
read_datasets(state_dict['QUERY_TEST'], state_dict['LAYOUT_TEST'],
state_dict['ANSWER_TEST'], return_unique=False)
train_qbatches = state_dict['TRAIN_QBATCHES']
train_lbatches = state_dict['TRAIN_LBATCHES']
train_obatches = state_dict['TRAIN_OBATCHES']
valid_qbatches = state_dict['VALID_QBATCHES']
valid_lbatches = state_dict['VALID_LBATCHES']
valid_obatches = state_dict['VALID_OBATCHES']
test_qbatches = state_dict['TEST_QBATCHES']
test_lbatches = state_dict['TEST_LBATCHES']
test_obatches = state_dict['TEST_OBATCHES']
if isinstance(training_task, VQAModuloTask):
train_images =
|
np.load(state_dict['IMG_TRAIN'])
|
numpy.load
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
try:
import scipy as sp
import scipy.sparse
except ImportError:
sp = None
import pytest
import vineyard
from vineyard.core import default_builder_context
from vineyard.core import default_resolver_context
from vineyard.data import register_builtin_types
register_builtin_types(default_builder_context, default_resolver_context)
def test_numpy_ndarray(vineyard_client):
arr = np.random.rand(4, 5, 6)
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
def test_empty_ndarray(vineyard_client):
arr = np.ones(())
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
arr = np.ones((0, 1))
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
arr = np.ones((0, 1, 2))
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
arr = np.ones((0, 1, 2, 3))
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
arr = np.zeros((), dtype='int')
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
arr = np.zeros((0, 1), dtype='int')
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
arr = np.zeros((0, 1, 2), dtype='int')
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
arr = np.zeros((0, 1, 2, 3), dtype='int')
object_id = vineyard_client.put(arr)
np.testing.assert_allclose(arr, vineyard_client.get(object_id))
def test_str_ndarray(vineyard_client):
arr = np.array(['', 'x', 'yz', 'uvw'])
object_id = vineyard_client.put(arr)
np.testing.assert_equal(arr, vineyard_client.get(object_id))
def test_object_ndarray(vineyard_client):
arr = np.array([1, 'x', 3.14, (1, 4)], dtype=object)
object_id = vineyard_client.put(arr)
np.testing.assert_equal(arr, vineyard_client.get(object_id))
arr =
|
np.ones((), dtype='object')
|
numpy.ones
|
import numpy as np
# Local modules
from constants import DEFAULT_N0, DEFAULT_N1, DEFAULT_N2, NO_INTERSECTION
from normal_map import NormalMap
import utils
class Object:
"""
Represent a generic object inside the scene that has a specific position,
material and intersect function.
Attributes:
position(numpy.array): A 3D point that represents the position
material(Material): The material to be rendered for this object
shader_type(string): The type of shader to use for this object
ID(int): The index inside the scene
normal_map(NormalMap): An object that allows you to get normals mapping
points of the object to a texture
"""
def __init__(self, position, material, shader_type):
self.position = position
self.material = material
self.shader_type = shader_type
self.ID = None
self.normal_map = None
def set_id(self, idx):
self.ID = idx
def normal_at(self, p):
"""
Get the normal at point p.
"""
pass
def uvmap(self, p):
"""
Map point p into texture coordinates u, v for this object.
"""
pass
def add_normal_map(self, texture):
self.normal_map = NormalMap(texture, self)
class Sphere(Object):
"""
Represent a Sphere object to be used in a scene.
Attributes:
position(numpy.array): A 3D point inside the plane
material(Material): The material to be rendered for this object
shader_type(string): The type of shader to use for this object
radius(float): The radius of this sphere
rotation(numpy.array) Rotation in x, y and z (in grads?).
"""
def __init__(
self, position, material, shader_type, radius, rotation=np.zeros(3)
):
Object.__init__(self, position, material, shader_type)
self.radius = radius
self.rotation = rotation
def __str__(self):
return "r: {}, pc: {}, rot: {}".format(
self.radius, self.position, self.rotation
)
def normal_at(self, p):
# This doesn't validate that p is in the surface
if self.normal_map:
return self.normal_map.get_normal(p)
return (p - self.position) / float(self.radius)
def physical_normal_at(self, p):
return (p - self.position) / float(self.radius)
def rotate_x(self, v):
theta = self.rotation[0]
rot_mat = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
])
rotated_v = np.dot(rot_mat, v)
return rotated_v
def rotate_y(self, v):
theta = self.rotation[1]
rot_mat = np.array([
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]
])
rotated_v = np.dot(rot_mat, v)
return rotated_v
def rotate_z(self, v):
theta = self.rotation[2]
rot_mat = np.array([
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]
])
rotated_v = np.dot(rot_mat, v)
return rotated_v
def get_orientation(self):
"""
Get the perpendicular unit vectors n0, n1, n2 that define the
orientation of this object
"""
# Only work with rotation around x by now
n0 = DEFAULT_N0
n1 = DEFAULT_N1
if self.rotation[2] != 0.0:
n0 = self.rotate_z(n0)
n1 = self.rotate_z(n1)
return n0, n1, DEFAULT_N2
def uvmap(self, p):
"""
Map this point into texture coordinates u, v.
Args:
p(numpy.array): Point inside this object that will be transformed
into texture coordinates (u, v)
Returns:
tuple: Point (u, v) in texture coordinates
"""
# local_v is the unit vector that goes in the direction from the center
# of the sphere to the position p
local_v = (p - self.position) / self.radius
n0, n1, n2 = self.get_orientation()
x = np.dot(n0, local_v)
y = np.dot(n1, local_v)
z = np.dot(n2, local_v)
# phi = np.arccos(z)
# v = phi / np.pi
# theta = np.arccos((y / np.sin(phi)).round(4))
# if x < 0:
# theta = 2 * np.pi - theta
# u = theta / (2 * np.pi)
u = 0.5 + np.arctan2(z, x) / (2 * np.pi)
v = 0.5 - np.arcsin(y) / np.pi
v = 1 - v
return u, v
def intersect_sphere_np(self, pr, nr):
pc = self.position
dif = pr - pc
b = np.dot(nr, dif)
c = np.dot(dif, dif) - self.radius ** 2
discriminant = b ** 2 - c
t = -1 * b -
|
np.sqrt(discriminant)
|
numpy.sqrt
|
import tensorflow as tf
import numpy as np
import cv2
import random
import PIL.Image
HEIGHT=192
WIDTH=256
NUM_PLANES = 20
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def writeExample(writer, validating, imagePath):
img = cv2.imread(imagePath['image'])
img = cv2.resize(img, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR)
height = img.shape[0]
width = img.shape[1]
img_raw = img.tostring()
normal = np.array(PIL.Image.open(imagePath['normal'])).astype(np.float32) / 255 * 2 - 1
normal = cv2.resize(normal, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR)
norm = np.linalg.norm(normal, 2, 2)
for c in range(3):
normal[:, :, c] /= norm
continue
depth = np.array(PIL.Image.open(imagePath['depth'])).astype(np.float32) / 1000
depth = cv2.resize(depth, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR)
invalid_mask = cv2.imread(imagePath['mask'], 0)
invalid_mask = cv2.resize(invalid_mask, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR)
invalid_mask = (invalid_mask < 128).astype(np.uint8)
invalid_mask_raw = invalid_mask.tostring()
# planeGlobal = np.load(imagePath['plane'])[:, :3]
# numPlanes = planeGlobal.shape[0]
# if numPlanes > NUM_PLANES:
# planeGlobal = planeGlobal[:NUM_PLANES]
# elif numPlanes < NUM_PLANES:
# planeGlobal = np.concatenate([planeGlobal, np.zeros((NUM_PLANES - numPlanes, 3))])
# pass
#masks = np.load(imagePath['masks'])
#masks = cv2.resize(masks, (WIDTH, HEIGHT), interpolation=cv2.INTER_NEAREST)
example = tf.train.Example(features=tf.train.Features(feature={
#'height': _int64_feature([height]),
#'width': _int64_feature([width]),
#'num_planes': _int64_feature([numPlanes]),
'image_path': _bytes_feature(imagePath['image']),
'image_raw': _bytes_feature(img_raw),
'normal': _float_feature(normal.reshape(-1)),
'depth': _float_feature(depth.reshape(-1)),
'invalid_mask_raw': _bytes_feature(invalid_mask_raw),
#'plane': _float_feature(planeGlobal.reshape(-1)),
#'plane_mask': _int64_feature(masks.reshape(-1)),
#'validating': _int64_feature([validating])
}))
writer.write(example.SerializeToString())
return
def loadImagePaths():
image_set_file = '../PythonScripts/SUNCG/image_list_500000.txt'
with open(image_set_file) as f:
filenames = [x.strip().replace('plane_global.npy', '') for x in f.readlines()]
image_paths = [{'image': x + 'mlt.png', 'plane': x + 'plane_global.npy', 'normal': x + 'norm_camera.png', 'depth': x + 'depth.png', 'mask': x + 'valid.png', 'masks': x + 'masks.npy'} for x in filenames]
pass
return image_paths
def readRecordFile():
tfrecords_filename = '../planes.tfrecords'
record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
height = int(example.features.feature['height']
.int64_list
.value[0])
width = int(example.features.feature['width']
.int64_list
.value[0])
img_string = (example.features.feature['image_raw']
.bytes_list
.value[0])
plane = (example.features.feature['plane_raw']
.float_list
.value)
plane_mask = (example.features.feature['plane_mask_raw']
.int64_list
.value)
img_1d =
|
np.fromstring(img_string, dtype=np.uint8)
|
numpy.fromstring
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rigid-body transformations including velocities and static forces."""
from absl import logging
import numpy as np
# Constants used to determine when a rotation is close to a pole.
_POLE_LIMIT = (1.0 - 1e-6)
_TOL = 1e-10
# Constant used to decide when to use the arcos over the arcsin
_TOL_ARCCOS = 1 / np.sqrt(2)
_IDENTITY_QUATERNION = np.array([1, 0, 0, 0], dtype=np.float64)
def _clip_within_precision(number: float, low: float,
high: float, precision: float = _TOL):
"""Clips input to the range [low, high], checking precision.
Args:
number: Number to be clipped.
low: Lower bound (inclusive).
high: Upper bound (inclusive).
precision: Tolerance.
Returns:
Input clipped to given range.
Raises:
ValueError: If number is outside given range by more than given precision.
"""
if number < low - precision or number > high + precision:
raise ValueError(
'Input {:.12f} not inside range [{:.12f}, {:.12f}] with precision {}'.
format(number, low, high, precision))
return np.clip(number, low, high)
def _batch_mm(m1, m2):
"""Batch matrix multiply.
Args:
m1: input lhs matrix with shape (batch, n, m).
m2: input rhs matrix with shape (batch, m, o).
Returns:
product matrix with shape (batch, n, o).
"""
return np.einsum('bij,bjk->bik', m1, m2)
def _rmat_to_euler_xyz(rmat):
"""Converts a 3x3 rotation matrix to XYZ euler angles."""
# | r00 r01 r02 | | cy*cz -cy*sz sy |
# | r10 r11 r12 | = | cz*sx*sy+cx*sz cx*cz-sx*sy*sz -cy*sx |
# | r20 r21 r22 | | -cx*cz*sy+sx*sz cz*sx+cx*sy*sz cx*cy |
if rmat[0, 2] > _POLE_LIMIT:
logging.log_every_n_seconds(logging.WARNING, 'Angle at North Pole', 60)
z = np.arctan2(rmat[1, 0], rmat[1, 1])
y = np.pi/2
x = 0.0
return np.array([x, y, z])
if rmat[0, 2] < -_POLE_LIMIT:
logging.log_every_n_seconds(logging.WARNING, 'Angle at South Pole', 60)
z = np.arctan2(rmat[1, 0], rmat[1, 1])
y = -np.pi/2
x = 0.0
return np.array([x, y, z])
z = -np.arctan2(rmat[0, 1], rmat[0, 0])
y = np.arcsin(rmat[0, 2])
x = -np.arctan2(rmat[1, 2], rmat[2, 2])
# order of return is the order of input
return np.array([x, y, z])
def _rmat_to_euler_xyx(rmat):
"""Converts a 3x3 rotation matrix to XYX euler angles."""
# | r00 r01 r02 | | cy sy*sx1 sy*cx1 |
# | r10 r11 r12 | = | sy*sx0 cx0*cx1-cy*sx0*sx1 -cy*cx1*sx0-cx0*sx1 |
# | r20 r21 r22 | | -sy*cx0 cx1*sx0+cy*cx0*sx1 cy*cx0*cx1-sx0*sx1 |
if rmat[0, 0] < 1.0:
if rmat[0, 0] > -1.0:
y = np.arccos(_clip_within_precision(rmat[0, 0], -1., 1.))
x0 = np.arctan2(rmat[1, 0], -rmat[2, 0])
x1 = np.arctan2(rmat[0, 1], rmat[0, 2])
return np.array([x0, y, x1])
else:
# Not a unique solution: x1_angle - x0_angle = atan2(-r12,r11)
y = np.pi
x0 = -np.arctan2(-rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, y, x1])
else:
# Not a unique solution: x1_angle + x0_angle = atan2(-r12,r11)
y = 0.0
x0 = -np.arctan2(-rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, y, x1])
def _rmat_to_euler_zyx(rmat):
"""Converts a 3x3 rotation matrix to ZYX euler angles."""
if rmat[2, 0] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
x = np.arctan2(rmat[0, 1], rmat[0, 2])
y = -np.pi/2
z = 0.0
return np.array([z, y, x])
if rmat[2, 0] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
x = np.arctan2(rmat[0, 1], rmat[0, 2])
y = np.pi/2
z = 0.0
return np.array([z, y, x])
x = np.arctan2(rmat[2, 1], rmat[2, 2])
y = -np.arcsin(rmat[2, 0])
z = np.arctan2(rmat[1, 0], rmat[0, 0])
# order of return is the order of input
return np.array([z, y, x])
def _rmat_to_euler_xzy(rmat):
"""Converts a 3x3 rotation matrix to XZY euler angles."""
if rmat[0, 1] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
y = np.arctan2(rmat[1, 2], rmat[1, 0])
z = -np.pi/2
x = 0.0
return np.array([x, z, y])
if rmat[0, 1] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
y = np.arctan2(rmat[1, 2], rmat[1, 0])
z = np.pi/2
x = 0.0
return np.array([x, z, y])
y = np.arctan2(rmat[0, 2], rmat[0, 0])
z = -np.arcsin(rmat[0, 1])
x = np.arctan2(rmat[2, 1], rmat[1, 1])
# order of return is the order of input
return np.array([x, z, y])
def _rmat_to_euler_yzx(rmat):
"""Converts a 3x3 rotation matrix to YZX euler angles."""
if rmat[1, 0] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
x = -np.arctan2(rmat[0, 2], rmat[0, 1])
z = np.pi/2
y = 0.0
return np.array([y, z, x])
if rmat[1, 0] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
x = -np.arctan2(rmat[0, 2], rmat[0, 1])
z = -np.pi/2
y = 0.0
return np.array([y, z, x])
x = -np.arctan2(rmat[1, 2], rmat[1, 1])
z = np.arcsin(rmat[1, 0])
y = -np.arctan2(rmat[2, 0], rmat[0, 0])
# order of return is the order of input
return np.array([y, z, x])
def _rmat_to_euler_zxy(rmat):
"""Converts a 3x3 rotation matrix to ZXY euler angles."""
if rmat[2, 1] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
y = np.arctan2(rmat[0, 2], rmat[0, 0])
x = np.pi/2
z = 0.0
return np.array([z, x, y])
if rmat[2, 1] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
y = np.arctan2(rmat[0, 2], rmat[0, 0])
x = -np.pi/2
z = 0.0
return np.array([z, x, y])
y = -np.arctan2(rmat[2, 0], rmat[2, 2])
x = np.arcsin(rmat[2, 1])
z = -np.arctan2(rmat[0, 1], rmat[1, 1])
# order of return is the order of input
return np.array([z, x, y])
def _rmat_to_euler_yxz(rmat):
"""Converts a 3x3 rotation matrix to YXZ euler angles."""
if rmat[1, 2] > _POLE_LIMIT:
logging.warning('Angle at North Pole')
z = -np.arctan2(rmat[0, 1], rmat[0, 0])
x = -np.pi/2
y = 0.0
return np.array([y, x, z])
if rmat[1, 2] < -_POLE_LIMIT:
logging.warning('Angle at South Pole')
z = -np.arctan2(rmat[0, 1], rmat[0, 0])
x = np.pi/2
y = 0.0
return np.array([y, x, z])
z = np.arctan2(rmat[1, 0], rmat[1, 1])
x = -np.arcsin(rmat[1, 2])
y = np.arctan2(rmat[0, 2], rmat[2, 2])
# order of return is the order of input
return np.array([y, x, z])
def _rmat_to_euler_xzx(rmat):
"""Converts a 3x3 rotation matrix to XZX euler angles."""
# | r00 r01 r02 | | cz -sz*cx1 sz*sx1 |
# | r10 r11 r12 | = | cx0*sz cx0*cz*cx1-sx0*sx1 -sx0*cx1-cx0*cz*sx1 |
# | r20 r21 r22 | | sx0*sz sx0*cz*cx1+cx0*sx1 cx0*cx1-sx0*cz*sx1 |
if rmat[0, 0] < 1.0:
if rmat[0, 0] > -1.0:
z = np.arccos(_clip_within_precision(rmat[0, 0], -1., 1.))
x0 = np.arctan2(rmat[2, 0], rmat[1, 0])
x1 = np.arctan2(rmat[0, 2], -rmat[0, 1])
return np.array([x0, z, x1])
else:
# Not a unique solution: x0_angle - x1_angle = atan2(r12,r11)
z = np.pi
x0 = np.arctan2(rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, z, x1])
else:
# Not a unique solution: x0_angle + x1_angle = atan2(-r12, r11)
z = 0.0
x0 = np.arctan2(-rmat[1, 2], rmat[1, 1])
x1 = 0.0
return np.array([x0, z, x1])
def _rmat_to_euler_yxy(rmat):
"""Converts a 3x3 rotation matrix to YXY euler angles."""
# | r00 r01 r02 | = | -sy0*sy1*cx+cy0*cy1 sx*sy0 sy0*cx*cy1+sy1*cy0 |
# | r10 r11 r12 | = | sx*sy1, cx -sx*cy1 |
# | r20 r21 r22 | = | -sy0*cy1-sy1*cx*cy0 sx*cy0 -sy0*sy1+cx*cy0*cy1 |
if rmat[1, 1] < 1.0:
if rmat[1, 1] > -1.0:
x = np.arccos(_clip_within_precision(rmat[1, 1], -1., 1.))
y0 = np.arctan2(rmat[0, 1], rmat[2, 1])
y1 = np.arctan2(rmat[1, 0], -rmat[1, 2])
return np.array([y0, x, y1])
else:
# Not a unique solution: y0_angle - y1_angle = atan2(r02, r22)
x = np.pi
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, x, y1])
else:
# Not a unique solution: y0_angle + y1_angle = atan2(r02, r22)
x = 0.0
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, x, y1])
def _rmat_to_euler_yzy(rmat):
"""Converts a 3x3 rotation matrix to YZY euler angles."""
# | r00 r01 r02 | = | -sy0*sy1+cy0*cy1*cz -sz*cy0 sy0*cy1+sy1*cy0*cz |
# | r10 r11 r12 | = | sz*cy1 cz sy1*sz |
# | r20 r21 r22 | = | -sy0*cy1*cz-sy1*cy0 sy0*sz -sy0*sy1*cz+cy0*cy1 |
if rmat[1, 1] < 1.0:
if rmat[1, 1] > -1.0:
z = np.arccos(_clip_within_precision(rmat[1, 1], -1., 1.))
y0 = np.arctan2(rmat[2, 1], -rmat[0, 1])
y1 = np.arctan2(rmat[1, 2], rmat[1, 0])
return np.array([y0, z, y1])
else:
# Not a unique solution: y0_angle - y1_angle = atan2(r02, r22)
z = np.pi
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, z, y1])
else:
# Not a unique solution: y0_angle + y1_angle = atan2(r02, r22)
z = 0.0
y0 = np.arctan2(rmat[0, 2], rmat[2, 2])
y1 = 0.0
return np.array([y0, z, y1])
def _rmat_to_euler_zxz(rmat):
"""Converts a 3x3 rotation matrix to ZXZ euler angles."""
# | r00 r01 r02 | = | -sz0*sz1*cx+cz0*cz1 -sz0*cx*cz1-sz1*cz0 sx*sz0 |
# | r10 r11 r12 | = | sz0*cz1+sz1*cx*cz0 -sz0*sz1+cx*cz0*cz1 -sx*cz0 |
# | r20 r21 r22 | = | sx*sz1 sx*cz1 cx |
if rmat[2, 2] < 1.0:
if rmat[2, 2] > -1.0:
x = np.arccos(_clip_within_precision(rmat[2, 2], -1., 1.))
z0 = np.arctan2(rmat[0, 2], -rmat[1, 2])
z1 = np.arctan2(rmat[2, 0], rmat[2, 1])
return np.array([z0, x, z1])
else:
# Not a unique solution: z0_angle - z1_angle = atan2(r10, r00)
x = np.pi
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, x, z1])
else:
# Not a unique solution: z0_angle + z1_angle = atan2(r10, r00)
x = 0.0
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, x, z1])
def _rmat_to_euler_zyz(rmat):
"""Converts a 3x3 rotation matrix to ZYZ euler angles."""
# | r00 r01 r02 | = | -sz0*sz1+cy*cz0*cz1 -sz0*cz1-sz1*cy*cz0 sy*cz0 |
# | r10 r11 r12 | = | sz0*cy*cz1+sz1*cz0 -sz0*sz1*cy+cz0*cz1 sy*sz0 |
# | r20 r21 r22 | = | -sy*cz1 sy*sz1 cy |
if rmat[2, 2] < 1.0:
if rmat[2, 2] > -1.0:
y = np.arccos(_clip_within_precision(rmat[2, 2], -1., 1.))
z0 = np.arctan2(rmat[1, 2], rmat[0, 2])
z1 = np.arctan2(rmat[2, 1], -rmat[2, 0])
return np.array([z0, y, z1])
else:
# Not a unique solution: z0_angle - z1_angle = atan2(r10, r00)
y = np.pi
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, y, z1])
else:
# Not a unique solution: z0_angle + z1_angle = atan2(r10, r00)
y = 0.0
z0 = np.arctan2(rmat[1, 0], rmat[0, 0])
z1 = 0.0
return np.array([z0, y, z1])
def _axis_rotation(theta, full: bool):
"""Returns the theta dim, cos and sin, and blank matrix for axis rotation."""
n = 1 if np.isscalar(theta) else len(theta)
ct = np.cos(theta)
st = np.sin(theta)
if full:
rmat = np.zeros((n, 4, 4))
rmat[:, 3, 3] = 1.
else:
rmat = np.zeros((n, 3, 3))
return n, ct, st, rmat
# map from full rotation orderings to euler conversion functions
_eulermap = {
'XYZ': _rmat_to_euler_xyz,
'XYX': _rmat_to_euler_xyx,
'XZY': _rmat_to_euler_xzy,
'ZYX': _rmat_to_euler_zyx,
'YZX': _rmat_to_euler_yzx,
'ZXY': _rmat_to_euler_zxy,
'YXZ': _rmat_to_euler_yxz,
'XZX': _rmat_to_euler_xzx,
'YXY': _rmat_to_euler_yxy,
'YZY': _rmat_to_euler_yzy,
'ZXZ': _rmat_to_euler_zxz,
'ZYZ': _rmat_to_euler_zyz,
}
def cross_mat_from_vec3(v):
"""Returns the skew-symmetric matrix cross-product operator.
Args:
v: A 3x1 vector.
Returns:
A matrix cross-product operator P (3x3) for the vector v = [x,y,z]^T,
such that v x b = Pb for any 3-vector b
"""
x, y, z = v[0], v[1], v[2]
return np.array([[0, -z, y],
[z, 0, -x],
[-y, x, 0]])
def axisangle_to_euler(axisangle, ordering: str = 'XYZ'):
"""Returns euler angles corresponding to the exponential coordinates.
Args:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length.
ordering: Desired euler angle ordering.
Returns: A euler triple
"""
rmat = axisangle_to_rmat(axisangle)
return rmat_to_euler(rmat, ordering)
def axisangle_to_rmat(axisangle):
"""Returns rotation matrix corresponding to the exponential coordinates.
See Murray1994: A Mathematical Introduction to Robotic Manipulation
Args:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length.
Returns: A tuple (w, theta)
R: a 3x3 numpy array describing the rotation
"""
theta = np.linalg.norm(axisangle)
if np.allclose(theta, 0):
s_theta = cross_mat_from_vec3(axisangle)
return np.eye(3) + s_theta + s_theta.dot(s_theta) * 0.5
else:
wn = axisangle / theta
s = cross_mat_from_vec3(wn)
return np.eye(3) + s * np.sin(theta) + s.dot(s) * (1-np.cos(theta))
def axisangle_to_quat(axisangle):
"""Returns the quaternion corresponding to the provided axis-angle vector.
Args:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length
Returns:
quat: A quaternion [w, i, j, k]
"""
theta = np.linalg.norm(axisangle)
if np.allclose(theta, 0):
return _IDENTITY_QUATERNION
else:
wn = axisangle/theta
return np.hstack([np.cos(theta/2), wn * np.sin(theta/2)])
def euler_to_axisangle(euler_vec, ordering: str = 'XYZ'):
"""Returns the euler angles corresponding to the provided axis-angle vector.
Args:
euler_vec: The euler angle rotations.
ordering: Desired euler angle ordering.
Returns:
axisangle: A 3x1 numpy array describing the axis of rotation, with angle
encoded by its length
"""
rmat = euler_to_rmat(euler_vec, ordering=ordering)
return rmat_to_axisangle(rmat)
def euler_to_quat(euler_vec, ordering: str = 'XYZ'):
"""Returns the quaternion corresponding to the provided euler angles.
Args:
euler_vec: The euler angle rotations.
ordering: Desired euler angle ordering.
Returns:
quat: A quaternion [w, i, j, k]
"""
mat = euler_to_rmat(euler_vec, ordering=ordering)
return mat_to_quat(mat)
def euler_to_rmat(euler_vec, ordering: str = 'ZXZ',
full: bool = False, extrinsic: bool = False):
"""Returns rotation matrix (or transform) for the given Euler rotations.
Euler*** methods compose a Rotation matrix corresponding to the given
rotations r1, r2, r3 following the given rotation ordering.
This operation follows the INTRINSIC rotation convention, i.e. defined w.r.t
the axes of the rotating system. Intrinsic rotations are evaluated in the
order provided. E.g. for XYZ we return rotX(r1) * rotY(r2) * rotZ(r3).
This is equivalent to ZYX extrinsic, because rotZ is evaluated first in the
fixed frame, which is then transformed by rotY and rotX.
From Wikipedia: http://en.wikipedia.org/wiki/Euler_angles
Any extrinsic rotation is equivalent to an extrinsic rotation by the same
angles but with inverted order of elemental rotations, and vice-versa. For
instance, the extrinsic rotations x-y'-z" by angles alpha, beta, gamma are
equivalent to the extrinsic rotations z-y-x by angles gamma, beta, alpha.
Args:
euler_vec: The euler angle rotations.
ordering: euler angle ordering string (see _euler_orderings).
full: If true, returns a full 4x4 transform.
extrinsic: Whether to use the extrinsic or intrinsic rotation convention.
Returns:
The rotation matrix or homogeneous transform corresponding to the given
Euler rotation.
"""
# map from partial rotation orderings to rotation functions
rotmap = {'X': rotation_x_axis, 'Y': rotation_y_axis, 'Z': rotation_z_axis}
rotations = [rotmap[c] for c in ordering]
if extrinsic:
rotations.reverse()
euler_vec = np.atleast_2d(euler_vec)
rots = []
for i in range(len(rotations)):
rots.append(rotations[i](euler_vec[:, i], full))
if rots[0].ndim == 3:
result = _batch_mm(_batch_mm(rots[0], rots[1]), rots[2])
return result.squeeze()
else:
return (rots[0].dot(rots[1])).dot(rots[2])
def positive_leading_quat(quat):
"""Returns the positive leading version of the quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
The equivalent quaternion [w, i, j, k] with w > 0.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
quat = np.where(np.tile(quat[..., 0:1] < 0, quat.shape[-1]), -quat, quat)
return quat
def quat_conj(quat):
"""Return conjugate of quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
A quaternion [w, -i, -j, -k] representing the inverse of the rotation
defined by `quat` (not assuming normalization).
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat = np.asarray(quat)
return np.stack(
[quat[..., 0], -quat[..., 1],
-quat[..., 2], -quat[..., 3]], axis=-1).astype(np.float64)
def quat_inv(quat):
"""Return inverse of quaternion.
This function supports inputs with or without leading batch dimensions.
Args:
quat: A quaternion [w, i, j, k].
Returns:
A quaternion representing the inverse of the original rotation.
"""
# Ensure quat is an np.array in case a tuple or a list is passed
quat =
|
np.asarray(quat)
|
numpy.asarray
|
import torch
from options import TestOptions
from dataset import dataset_single, dataset_unpair
from model import DCDA
from saver import save_imgs
import os
import numpy as np
from numpy import mean, std
import pandas as pd
import scipy.spatial
import surface_distance
def getDSC(testImage, resultImage):
"""Compute the Dice Similarity Coefficient."""
testArray = testImage.flatten()
resultArray = resultImage.flatten()
return 1.0 - scipy.spatial.distance.dice(testArray, resultArray)
def getJaccard(testImage, resultImage):
"""Compute the Dice Similarity Coefficient."""
testArray = testImage.flatten()
resultArray = resultImage.flatten()
return 1.0 - scipy.spatial.distance.jaccard(testArray, resultArray)
def getPrecisionAndRecall(testImage, resultImage):
testArray = testImage.flatten()
resultArray = resultImage.flatten()
TP = np.sum(testArray*resultArray)
FP = np.sum((1-testArray)*resultArray)
FN = np.sum(testArray*(1-resultArray))
precision = TP/(TP+FP)
recall = TP/(TP+FN)
return precision, recall
def getHD_ASSD(seg_preds, seg_labels):
label_seg = np.array(seg_labels, dtype=bool)
predict = np.array(seg_preds, dtype=bool)
surface_distances = surface_distance.compute_surface_distances(
label_seg, predict, spacing_mm=(1, 1))
HD = surface_distance.compute_robust_hausdorff(surface_distances, 95)
distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
surfel_areas_gt = surface_distances["surfel_areas_gt"]
surfel_areas_pred = surface_distances["surfel_areas_pred"]
ASSD = (np.sum(distances_pred_to_gt * surfel_areas_pred) + np.sum(distances_gt_to_pred * surfel_areas_gt))/(
|
np.sum(surfel_areas_gt)
|
numpy.sum
|
import numpy as np
from scipy.stats import ortho_group
def quad_func_params(lambda_1, lambda_2, m):
"""
Create function arguments for the quadratic function.
Parameters
----------
lambda_1 : integer
Smallest eigenvalue of diagonal matrix.
lambda_2 : integer
Largest eigenvalue of diagonal matrix.
m : integer
Number of variables.
Returns
----------
matrix_test : 2-D array
Positive definite matrix.
"""
if lambda_1 != lambda_2:
diag_vals = np.zeros(m)
diag_vals[:2] =
|
np.array([lambda_1, lambda_2])
|
numpy.array
|
import sys
import numpy as np
from itertools import combinations
from pyemto.utilities.utils import rotation_matrix
import spglib as spg
try:
from pymatgen import Lattice, Structure
from pymatgen.vis.structure_vtk import StructureVis
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.util.coord import get_angle
except ImportError:
# pymatgen has not been installed
raise ImportError('emto_input_generator requires pymatgen>=4.4.0 to be installed!')
import os
import pyemto
import pyemto.common.common as common
class EMTO:
"""This class can be used to create EMTO input files from
an arbitrary structure. What is needed as input:
-primitive lattice vectors,
-basis vectors,
-list of atomic species that occupy the basis sites.
"""
def __init__(self, folder=None, EMTOdir=None):
""" """
if folder is None:
self.folder = os.getcwd()
else:
self.folder = folder
if EMTOdir is None:
self.EMTOdir = '/home/EMTO'
else:
self.EMTOdir = EMTOdir
self.sg2ibz = {1:14, 2:14, 3:12, 4:12, 5:13, 6:12, 7:12, 8:13, 9:13, 10:12,
11:12, 12:13, 13:12, 14:12, 15:13, 16:8, 17:8, 18:8, 19:8, 20:9,
21:9, 22:11, 23:10, 24:10, 25:8, 26:8, 27:8, 28:8, 29:8, 30:8,
31:8, 32:8, 33:8, 34:8, 35:9, 36:9, 37:9, 38:9, 39:9, 40:9,
41:9, 42:11, 43:11, 44:10, 45:10, 46:10, 47:8, 48:8, 49:8, 50:8,
51:8, 52:8, 53:8, 54:8, 55:8, 56:8, 57:8, 58:8, 59:8, 60:8,
61:8, 62:8, 63:9, 64:9, 65:9, 66:9, 67:9, 68:9, 69:11, 70:11,
71:10, 72:10, 73:10, 74:10, 75:5, 76:5, 77:5, 78:5, 79:6, 80:6,
81:5, 82:6, 83:5, 84:5, 85:5, 86:5, 87:6, 88:6, 89:5, 90:5,
91:5, 92:5, 93:5, 94:5, 95:5, 96:5, 97:6, 98:6, 99:5, 100:5,
101:5, 102:5, 103:5, 104:5, 105:5, 106:5, 107:6, 108:6, 109:6, 110:6,
111:5, 112:5, 113:5, 114:5, 115:5, 116:5, 117:5, 118:5, 119:6, 120:6,
121:6, 122:6, 123:5, 124:5, 125:5, 126:5, 127:5, 128:5, 129:5, 130:5,
131:5, 132:5, 133:5, 134:5, 135:5, 136:5, 137:5, 138:5, 139:6, 140:6,
141:6, 142:6, 143:4, 144:4, 145:4, 146:7, 147:4, 148:7, 149:4, 150:4,
151:4, 152:4, 153:4, 154:4, 155:7, 156:4, 157:4, 158:4, 159:4, 160:7,
161:7, 162:4, 163:4, 164:4, 165:4, 166:7, 167:7, 168:4, 169:4, 170:4,
171:4, 172:4, 173:4, 174:4, 175:4, 176:4, 177:4, 178:4, 179:4, 180:4,
181:4, 182:4, 183:4, 184:4, 185:4, 186:4, 187:4, 188:4, 189:4, 190:4,
191:4, 192:4, 193:4, 194:4, 195:1, 196:2, 197:3, 198:1, 199:3, 200:1,
201:1, 202:2, 203:2, 204:3, 205:1, 206:3, 207:1, 208:1, 209:2, 210:2,
211:3, 212:1, 213:1, 214:3, 215:1, 216:2, 217:3, 218:1, 219:2, 220:3,
221:1, 222:1, 223:1, 224:1, 225:2, 226:2, 227:2, 228:2, 229:3, 230:3}
self.sg2bl = {1:'simple triclinic', 2:'simple triclinic',
3:'simple monoclinic', 4:'simple monoclinic',
5:'base-centered monoclinic', 6:'simple monoclinic',
7:'simple monoclinic', 8:'base-centered monoclinic',
9:'base-centered monoclinic', 10:'simple monoclinic',
11:'simple monoclinic', 12:'base-centered monoclinic',
13:'simple monoclinic', 14:'simple monoclinic',
15:'base-centered monoclinic', 16:'simple orthorhombic',
17:'simple orthorhombic', 18:'simple orthorhombic',
19:'simple orthorhombic', 20:'base-centered orthorhombic',
21:'base-centered orthorhombic', 22:'face-centered orthorhombic',
23:'body-centered orthorhombic', 24:'body-centered orthorhombic',
25:'simple orthorhombic', 26:'simple orthorhombic',
27:'simple orthorhombic', 28:'simple orthorhombic',
29:'simple orthorhombic', 30:'simple orthorhombic',
31:'simple orthorhombic', 32:'simple orthorhombic',
33:'simple orthorhombic', 34:'simple orthorhombic',
35:'base-centered orthorhombic', 36:'base-centered orthorhombic',
37:'base-centered orthorhombic', 38:'base-centered orthorhombic',
39:'base-centered orthorhombic', 40:'base-centered orthorhombic',
41:'base-centered orthorhombic', 42:'face-centered orthorhombic',
43:'face-centered orthorhombic', 44:'body-centered orthorhombic',
45:'body-centered orthorhombic', 46:'body-centered orthorhombic',
47:'simple orthorhombic', 48:'simple orthorhombic',
49:'simple orthorhombic', 50:'simple orthorhombic',
51:'simple orthorhombic', 52:'simple orthorhombic',
53:'simple orthorhombic', 54:'simple orthorhombic',
55:'simple orthorhombic', 56:'simple orthorhombic',
57:'simple orthorhombic', 58:'simple orthorhombic',
59:'simple orthorhombic', 60:'simple orthorhombic',
61:'simple orthorhombic', 62:'simple orthorhombic',
63:'base-centered orthorhombic', 64:'base-centered orthorhombic',
65:'base-centered orthorhombic', 66:'base-centered orthorhombic',
67:'base-centered orthorhombic', 68:'base-centered orthorhombic',
69:'face-centered orthorhombic', 70:'face-centered orthorhombic',
71:'body-centered orthorhombic', 72:'body-centered orthorhombic',
73:'body-centered orthorhombic', 74:'body-centered orthorhombic',
75:'simple tetragonal', 76:'simple tetragonal',
77:'simple tetragonal', 78:'simple tetragonal',
79:'body-centered tetragonal', 80:'body-centered tetragonal',
81:'simple tetragonal', 82:'body-centered tetragonal',
83:'simple tetragonal', 84:'simple tetragonal',
85:'simple tetragonal', 86:'simple tetragonal',
87:'body-centered tetragonal', 88:'body-centered tetragonal',
89:'simple tetragonal', 90:'simple tetragonal',
91:'simple tetragonal', 92:'simple tetragonal',
93:'simple tetragonal', 94:'simple tetragonal',
95:'simple tetragonal', 96:'simple tetragonal',
97:'body-centered tetragonal', 98:'body-centered tetragonal',
99:'simple tetragonal', 100:'simple tetragonal',
101:'simple tetragonal', 102:'simple tetragonal',
103:'simple tetragonal', 104:'simple tetragonal',
105:'simple tetragonal', 106:'simple tetragonal',
107:'body-centered tetragonal', 108:'body-centered tetragonal',
109:'body-centered tetragonal', 110:'body-centered tetragonal',
111:'simple tetragonal', 112:'simple tetragonal',
113:'simple tetragonal', 114:'simple tetragonal',
115:'simple tetragonal', 116:'simple tetragonal',
117:'simple tetragonal', 118:'simple tetragonal',
119:'body-centered tetragonal', 120:'body-centered tetragonal',
121:'body-centered tetragonal', 122:'body-centered tetragonal',
123:'simple tetragonal', 124:'simple tetragonal',
125:'simple tetragonal', 126:'simple tetragonal',
127:'simple tetragonal', 128:'simple tetragonal',
129:'simple tetragonal', 130:'simple tetragonal',
131:'simple tetragonal', 132:'simple tetragonal',
133:'simple tetragonal', 134:'simple tetragonal',
135:'simple tetragonal', 136:'simple tetragonal',
137:'simple tetragonal', 138:'simple tetragonal',
139:'body-centered tetragonal', 140:'body-centered tetragonal',
141:'body-centered tetragonal', 142:'body-centered tetragonal',
143:'hexagonal', 144:'hexagonal',
145:'hexagonal', 146:'rhombohedral',
147:'hexagonal', 148:'rhombohedral',
149:'hexagonal', 150:'hexagonal',
151:'hexagonal', 152:'hexagonal',
153:'hexagonal', 154:'hexagonal',
155:'rhombohedral', 156:'hexagonal',
157:'hexagonal', 158:'hexagonal',
159:'hexagonal', 160:'rhombohedral',
161:'rhombohedral', 162:'hexagonal',
163:'hexagonal', 164:'hexagonal',
165:'hexagonal', 166:'rhombohedral',
167:'rhombohedral', 168:'hexagonal',
169:'hexagonal', 170:'hexagonal',
171:'hexagonal', 172:'hexagonal',
173:'hexagonal', 174:'hexagonal',
175:'hexagonal', 176:'hexagonal',
177:'hexagonal', 178:'hexagonal',
179:'hexagonal', 180:'hexagonal',
181:'hexagonal', 182:'hexagonal',
183:'hexagonal', 184:'hexagonal',
185:'hexagonal', 186:'hexagonal',
187:'hexagonal', 188:'hexagonal',
189:'hexagonal', 190:'hexagonal',
191:'hexagonal', 192:'hexagonal',
193:'hexagonal', 194:'hexagonal',
195:'simple cubic', 196:'face-centered cubic',
197:'body-centered cubic', 198:'simple cubic',
199:'body-centered cubic', 200:'simple cubic',
201:'simple cubic', 202:'face-centered cubic',
203:'face-centered cubic', 204:'body-centered cubic',
205:'simple cubic', 206:'body-centered cubic',
207:'simple cubic', 208:'simple cubic',
209:'face-centered cubic', 210:'face-centered cubic',
211:'body-centered cubic', 212:'simple cubic',
213:'simple cubic', 214:'body-centered cubic',
215:'simple cubic', 216:'face-centered cubic',
217:'body-centered cubic', 218:'simple cubic',
219:'face-centered cubic', 220:'body-centered cubic',
221:'simple cubic', 222:'simple cubic',
223:'simple cubic', 224:'simple cubic',
225:'face-centered cubic', 226:'face-centered cubic',
227:'face-centered cubic', 228:'face-centered cubic',
229:'body-centered cubic', 230:'body-centered cubic'}
# BMDL, KSTR, SHAPE, KGRN and KFCD class instances
self.input_system = pyemto.System(folder=self.folder, EMTOdir=self.EMTOdir)
#
self.fit_angle_tol = 5e-6
self.fit_norm_ratio_tol = 5e-6
return
def calc_ws_radius(self, struct):
bohr2angst = 0.52917721
vol_unit = struct.volume/struct.num_sites
sws = (3*vol_unit/4.0/np.pi)**(1.0/3)/bohr2angst
return sws
def make_basis_array(self, struct):
"""Returns a 2D numpy array of the basis atom coordinates
in !!Cartesian!! coordinates.
"""
len_basis = struct.num_sites
emto_basis = np.zeros((len_basis, 3))
for i in range(len_basis):
emto_basis[i, :] = struct.sites[i].coords
return emto_basis
def make_sites_array(self, struct):
len_basis = struct.num_sites
emto_sites = []
for i in range(len_basis):
emto_sites.append(struct.sites[i].specie.number)
return emto_sites
def make_cpa_sites_array(self, struct):
len_basis = struct.num_sites
self.atoms_cpa = []
self.concs_cpa = []
self.splts_cpa = []
self.fixs_cpa = []
for i in range(len_basis):
atom_number = struct.sites[i].specie.number
for j in range(len(self.pmg_species)):
if atom_number == self.pmg_species[j]:
self.atoms_cpa.append(self.species[j])
self.concs_cpa.append(self.concs[j])
self.splts_cpa.append(self.splts[j])
self.fixs_cpa.append(self.fixs[j])
break
def get_equivalent_sites(self):
"""Find all the sites that have exactly the same species,
concentrations, and magnetic moments"""
splt_tol = 1e-6
conc_tol = 1e-6
species_sorted = []
splts_sorted = []
concs_sorted = []
for i in range(len(self.species)):
tmp1 = []
tmp2 = []
tmp3 = []
ind_sorted = np.argsort(self.species[i])
for ind in ind_sorted:
tmp1.append(self.species[i][ind])
tmp2.append(self.splts[i][ind])
tmp3.append(self.concs[i][ind])
species_sorted.append(tmp1)
splts_sorted.append(tmp2)
concs_sorted.append(tmp3)
eqv_sites = np.zeros((len(species_sorted), len(species_sorted)), dtype=np.int) + 9999
for i in range(len(species_sorted)-1):
for j in range(i+1, len(species_sorted)):
eqv_sites[i,j] = 1
if len(species_sorted[i]) != len(species_sorted[j]):
# Sites i and j contain different amound of atoms.
# For now, take them to be non-equivalent, although
# they could still be equivalent in the case that
# some element has been split into two or more parts
# concentration-wise (whole and the parts should have
# identical magnetic moments).
eqv_sites[i, j] = 0
else:
for a1, a2, splt1, splt2, conc1, conc2 in zip(species_sorted[i], species_sorted[j],
splts_sorted[i], splts_sorted[j], concs_sorted[i], concs_sorted[j]):
if a1 != a2 or np.abs(splt1 - splt2) > splt_tol or np.abs(conc1 - conc2) > conc_tol:
# Some pair of atoms (in the sorted lists) were not
# the same => sites i and j are not equivalent.
eqv_sites[i, j] = 0
break
output_sites = np.ones(len(species_sorted), dtype=np.int) * 9999
next_available = 1
for i in range(len(species_sorted)-1):
if output_sites[i] == 9999:
output_sites[i] = next_available
next_available += 1
for j in range(i+1, len(species_sorted)):
if eqv_sites[i, j] == 1:
output_sites[j] = output_sites[i]
if output_sites[-1] == 9999:
output_sites[-1] = next_available
return output_sites
def prepare_input_files(self, prims=None, basis=None, latpath=None,
coords_are_cartesian=False, latname=None,
species=None, find_primitive=True,
concs=None, splts=None, its=None, ws_wsts=None,
make_supercell=None, fixs=None,
**kwargs):
if prims is None:
sys.exit('EMTO.init_structure(): \'prims\' has to be given!')
if basis is None:
sys.exit('EMTO.init_structure(): \'basis\' has to be given!')
if latpath is None:
self.latpath = os.getcwd()
else:
self.latpath = latpath
if latname is None:
self.latname = 'structure'
else:
self.latname = latname
self.prims = np.array(prims)
self.basis = np.array(basis)
self.len_basis = len(self.basis[:, 0])
if species is None:
sys.exit('EMTO.init_structure(): \'species\' has to be given!')
else:
self.species = []
for i in range(len(species)):
if isinstance(species[i], list):
tmp = []
for j in range(len(species[i])):
tmp.append(species[i][j])
self.species.append(tmp)
else:
self.species.append([species[i]])
if splts is None:
# Assume a zero moments array
self.splts = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(0.0)
self.splts.append(tmp)
else:
self.splts.append([0.0])
else:
self.splts = []
for i in range(len(splts)):
if isinstance(splts[i], list):
tmp = []
for j in range(len(splts[i])):
tmp.append(splts[i][j])
self.splts.append(tmp)
else:
self.splts.append([splts[i]])
if fixs is None:
# Assume a zero moments array
self.fixs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append('N')
self.fixs.append(tmp)
else:
self.fixs.append(['N'])
else:
self.fixs = []
for i in range(len(fixs)):
if isinstance(fixs[i], list):
tmp = []
for j in range(len(fixs[i])):
tmp.append(fixs[i][j])
self.fixs.append(tmp)
else:
self.fixs.append([fixs[i]])
if concs is None:
# Assume a zero moments array
self.concs = []
for i in range(len(self.species)):
if isinstance(self.species[i], list):
tmp = []
for j in range(len(self.species[i])):
tmp.append(1.0/len(self.species[i]))
self.concs.append(tmp)
else:
self.concs.append([1.0])
else:
self.concs = []
for i in range(len(concs)):
if isinstance(concs[i], list):
tmp = []
tmp_sum = 0.0
for j in range(len(concs[i])):
tmp.append(concs[i][j])
tmp_sum += concs[i][j]
print(tmp_sum)
if tmp_sum < 1.1:
if np.abs(tmp_sum - 1.0) > 1.e-6:
sys.exit('Concentrations {0} for site {1} do not add up to 1.0!!!'.format(concs[i], i+1))
else:
if np.abs(tmp_sum - 100.0) > 1.e-3:
sys.exit('Concentrations {0} for site {1} do not add up to 100!!!'.format(concs[i], i+1))
self.concs.append(tmp)
else:
self.concs.append([concs[i]])
# Check that all species, concs, and splts arrays have the same dimensions
for a, b in combinations([self.basis, self.species, self.concs, self.splts, self.fixs], 2):
if len(a) != len(b):
print(a, 'len = ', len(a))
print(b, 'len = ', len(b))
sys.exit('The above input arrays have inconsistent lengths!!!')
for a, b in combinations([self.species, self.concs, self.splts, self.fixs], 2):
for sublist1, sublist2 in zip(a, b):
if len(sublist1) != len(sublist2):
print(sublist1, 'len = ', len(sublist1))
print(sublist2, 'len = ', len(sublist2))
sys.exit('The above input array elements have inconsistent lengths!!!')
self.find_primitive = find_primitive
if self.find_primitive:
self.pmg_species = self.get_equivalent_sites()
else:
self.pmg_species = np.linspace(1, len(self.species), len(self.species), dtype=np.int)
#
self.coords_are_cartesian = coords_are_cartesian
self.ibz = None
self.make_supercell = make_supercell
#
self.pmg_input_lattice = Lattice(self.prims)
self.pmg_input_struct = Structure(self.pmg_input_lattice, self.pmg_species, self.basis,
coords_are_cartesian=self.coords_are_cartesian)
#
if self.make_supercell is not None:
self.pmg_input_struct.make_supercell(self.make_supercell)
#
self.sws = self.calc_ws_radius(self.pmg_input_struct)
#
self.finder = SpacegroupAnalyzer(self.pmg_input_struct, symprec=0.0001, angle_tolerance=0.0001)
self.stm = StructureMatcher(ltol=0.001, stol=0.001, angle_tol=0.001, attempt_supercell=True)
#
print("Input structure information:")
print(self.pmg_input_struct)
print("Volume: ", self.pmg_input_struct.volume)
print("Lattice vectors:")
print(self.pmg_input_struct.lattice.matrix)
print("")
#
# spglib
spg_cell = (
self.pmg_input_lattice.matrix,
self.pmg_input_struct.frac_coords,
self.pmg_species
)
self.spg_space_group = spg.get_spacegroup(spg_cell)
self.spg_space_group_number = int(self.spg_space_group.split()[-1].lstrip('(').rstrip(')'))
self.spg_space_group_symbol = self.spg_space_group
self.spg_prim_lat, self.spg_prim_pos, self.spg_prim_species = spg.standardize_cell(spg_cell,
to_primitive=True)
self.prim_struct = Structure(Lattice(self.spg_prim_lat), self.spg_prim_species, self.spg_prim_pos)
self.spg_ibz = self.sg2ibz[self.spg_space_group_number]
self.ibz = self.spg_ibz
mesh = [kwargs['nkx'], kwargs['nky'], kwargs['nkz']]
#print()
#print('#'*60)
mapping, grid = spg.get_ir_reciprocal_mesh(mesh, spg_cell, is_time_reversal=True, is_shift=(0, 0, 0))
uniques, counts = np.unique(mapping, return_counts=True)
all_weights = []
kpoints = []
weights = []
for xx in mapping:
all_weights.append(counts[np.argwhere(uniques == xx).flatten()[0]])
for xx, yy in zip(uniques, counts):
kpoints.append(grid[np.argwhere(mapping == xx).flatten()[0]])
weights.append(yy)
#for xx, yy, zz in zip(mapping, grid, all_weights):
# print(xx, yy, zz)
#print()
#for kp, ww in zip(kpoints, weights):
# print(kp, ww)
#print()
#print('NKVEC = ', len(kpoints))
#print('#'*60)
#print()
#print(spg_prim_pos)
#print(spg_prim_species)
#
#print("Detected standard conventional structure:")
#print(self.conv_struct)
#print("Volume: ",self.conv_struct.volume)
#print("Lattice vectors:")
#print(self.conv_struct.lattice.matrix)
#print("")
print("Detected standardized structure:")
print(self.prim_struct)
print("Volume: ", self.prim_struct.volume)
print("Lattice vectors:")
print(self.prim_struct.lattice.matrix)
print("")
#
self.primaa = self.prim_struct.lattice.matrix[0, :]
self.primbb = self.prim_struct.lattice.matrix[1, :]
self.primcc = self.prim_struct.lattice.matrix[2, :]
self.output_basis = self.make_basis_array(self.prim_struct)
# Below we calculate the transformation that maps
# self.primaX to lattice vectors used by EMTO.
# This transform depends on the type of the Bravais lattice,
# so each case must be treated separately.
if self.spg_ibz == 1:
norm_tmp = np.linalg.norm(self.primaa)
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1, 0, 0])
self.emto_primb = np.array([0, 1, 0])
self.emto_primc = np.array([0, 0, 1])
self.emto_basis = self.output_basis
elif self.spg_ibz == 2:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, 0])
self.emto_primb = np.array([0, 0.5, 0.5])
self.emto_primc = np.array([0.5, 0, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 3:
norm_tmp = 2*self.primaa[1]
self.output_prima = self.primcc/norm_tmp
self.output_primb = self.primaa/norm_tmp
self.output_primc = self.primbb/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = 0.0
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, 0.5, -0.5])
self.emto_primb = np.array([-0.5, 0.5, 0.5])
self.emto_primc = np.array([0.5, -0.5, 0.5])
self.emto_basis = self.output_basis
elif self.spg_ibz == 4:
rot1 = rotation_matrix([0.0, 0.0, 1.0], 0./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([1., 0, 0])
self.emto_primb = np.array([-0.5, np.sqrt(3.)/2, 0])
self.emto_primc = np.array([0., 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 5:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 0.0
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1.0, 0.0, 0.0])
self.emto_primb = np.array([0.0, 1.0, 0.0])
self.emto_primc = np.array([0.0, 0.0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 6:
self.output_prima = self.primbb
self.output_primb = self.primcc
self.output_primc = self.primaa
# Apply transformation on the basis atoms
self.output_basis = self.output_basis
self.output_boa = 0.0
self.output_coa = 2*self.output_prima[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, -0.5, self.output_coa/2])
self.emto_primb = np.array([0.5, 0.5, -self.output_coa/2])
self.emto_primc = np.array([-0.5, 0.5, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 7:
alpha = self.prim_struct.lattice.alpha
kulma = np.arctan((self.primaa[0]+self.primbb[0]+self.primcc[0])/
(self.primaa[2]+self.primbb[2]+self.primcc[2]))
rot1 = rotation_matrix([0.0, -1.0, 0.0], kulma)
rot2 = np.array([[-np.sqrt(3.0)/2, -0.5, 0.0],
[0.5, -np.sqrt(3.0)/2, 0.0],
[0.0, 0.0, 1.0]])
self.output_prima = np.dot(rot2, np.dot(rot1, self.primaa))
self.output_primb = np.dot(rot2, np.dot(rot1, self.primbb))
self.output_primc = np.dot(rot2, np.dot(rot1, self.primcc))
scale_a = self.output_prima[1]
print('scale_a = ',scale_a)
self.output_prima = self.output_prima/scale_a
self.output_primb = self.output_primb/scale_a
self.output_primc = self.output_primc/scale_a
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot2, np.dot(rot1, self.output_basis[i, :]))/scale_a
self.output_boa = 1.0
self.output_coa = self.output_prima[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.0, 1.0, self.output_coa])
self.emto_primb = np.array([-np.sqrt(3.)/2, -0.5, self.output_coa])
self.emto_primc = np.array([np.sqrt(3.)/2, -0.5, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 8:
if (np.abs(self.primaa[0]) < np.abs(self.primbb[1])) and \
(np.abs(self.primbb[1]) < np.abs(self.primcc[2])):
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
elif np.abs(np.abs(self.primaa[0]) - np.abs(self.primbb[1])) < 1.e-6 and \
np.abs(self.primbb[1]) < np.abs(self.primcc[2]):
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
elif np.abs(self.primaa[0]) < np.abs(self.primcc[2]):
norm_tmp = self.primcc[2]
rot1 = rotation_matrix([0.0, 0.0, 1.0], -90./180*np.pi)
rot2 = rotation_matrix([-1.0, 0.0, 0.0], 90./180*np.pi)
self.output_prima = np.dot(rot2, np.dot(rot1, self.primbb))/norm_tmp
self.output_primb = np.dot(rot2, np.dot(rot1, self.primcc))/norm_tmp
self.output_primc = np.dot(rot2, np.dot(rot1, self.primaa))/norm_tmp
print(self.output_prima)
print(self.output_primb)
print(self.output_primc)
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot2, np.dot(rot1, self.output_basis[i, :]))/norm_tmp
else:
norm_tmp = self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
#
self.output_boa = self.output_primb[1]
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([1.0, 0.0, 0.0])
self.emto_primb = np.array([0.0, self.output_boa, 0.0])
self.emto_primc = np.array([0.0, 0.0 ,self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 9:
if np.abs(self.primbb[1] - 0.5) < 1e-12 and \
np.abs(self.primcc[1] + 0.5) < 1e-12:
rot1 = rotation_matrix([0.0, 1.0, 0.0], 90./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
# Redefine lattice vectors
tmp = np.copy(self.output_prima)
self.output_prima[:] = self.output_primc[:]
self.output_primc[:] = tmp
# Mirror along the xy-plane
self.output_primc *= -1
# Scale lattice vectors so that a1 and a2 x-components are 0.5
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,:] = np.dot(rot1, self.output_basis[i, :])
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i,2] *= -1
self.output_basis /= norm_tmp
#print(self.output_prima)
#print(self.output_primb)
#print(self.output_primc)
else:
norm_tmp = 2*self.primaa[0]
self.output_prima = self.primaa/norm_tmp
self.output_primb = self.primbb/norm_tmp
self.output_primc = self.primcc/norm_tmp
# Apply transformation on the basis atoms
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 2*self.output_primb[1]
self.output_coa = self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([0.5, -self.output_boa/2, 0])
self.emto_primb = np.array([0.5, self.output_boa/2, 0])
self.emto_primc = np.array([0, 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 10:
self.output_prima = np.zeros_like(self.primaa)
self.output_primb = np.zeros_like(self.primbb)
self.output_primc = np.zeros_like(self.primcc)
self.output_prima[0] = self.primaa[1]
self.output_prima[1] = self.primaa[0]
self.output_prima[2] = self.primaa[2]
self.output_primb[0] = self.primcc[1]
self.output_primb[1] = self.primcc[0]
self.output_primb[2] = self.primcc[2]
self.output_primc[0] = self.primbb[1]
self.output_primc[1] = self.primbb[0]
self.output_primc[2] = self.primbb[2]
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
basis_tmp = np.copy(self.output_basis)
self.output_basis[:, 0] = basis_tmp[:, 1]
self.output_basis[:, 1] = basis_tmp[:, 0]
self.output_basis = self.output_basis/norm_tmp
self.output_boa = 2*self.output_primc[1]
self.output_coa = 2*self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
self.emto_prima = np.array([0.5, -self.output_boa/2, self.output_coa/2])
self.emto_primb = np.array([0.5, self.output_boa/2, -self.output_coa/2])
self.emto_primc = np.array([-0.5, self.output_boa/2, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 11:
rot1 = rotation_matrix([1, 1, 1], 120./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa)
self.output_primb = np.dot(rot1, self.primbb)
self.output_primc = np.dot(rot1, self.primcc)
norm_tmp = 2*self.output_prima[0]
self.output_prima /= norm_tmp
self.output_primb /= norm_tmp
self.output_primc /= norm_tmp
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])
self.output_basis /= norm_tmp
self.output_boa = 2*self.output_primc[1]
self.output_coa = 2*self.output_primc[2]
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = 0.0
# EMTO convention:
self.emto_prima = np.array([0.5, 0, self.output_coa/2])
self.emto_primb = np.array([0.5, self.output_boa/2, 0])
self.emto_primc = np.array([0, self.output_boa/2, self.output_coa/2])
self.emto_basis = self.output_basis
elif self.spg_ibz == 12:
bc_norm = np.linalg.norm(self.primaa)
# Rotate 90 degreen counter clockwise around the x-axis
rot1 = rotation_matrix([1, 0, 0], -90./180*np.pi)
self.output_prima = np.dot(rot1, self.primaa/bc_norm)
self.output_primb = np.dot(rot1, self.primcc/bc_norm)
self.output_primc = np.dot(rot1, self.primbb/bc_norm)
# Mirror a3 from negative z-axis to positive side
self.output_primc *= -1.0
# spg uses gamma > 90, so we redefine the a3 lattice vector so that
# gamma < 90:
self.output_primb[0] *= -1.0
gamma = get_angle(self.output_prima, self.output_primb)
y_fac = self.output_primb[1]
shift = np.abs(2*self.output_primb[0])
#
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot1, self.output_basis[i, :])/bc_norm
# Transform basis because self.output_primc was mirrored:
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, 2] *= -1.0
# Transform basis because gamma was changed above:
for i in range(len(self.output_basis[:, 0])):
#self.output_basis[i, :] = np.dot(shift_mat, self.output_basis[i, :])
if self.output_basis[i, 1] > 0:
self.output_basis[i, 0] += shift * np.abs(self.output_basis[i, 1] / y_fac)
else:
self.output_basis[i, 0] -= shift * np.abs(self.output_basis[i, 1] / y_fac)
self.output_boa = np.linalg.norm(self.output_primb)
self.output_coa = np.linalg.norm(self.output_primc)
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = gamma
self.emto_prima = np.array([1.0, 0, 0])
self.emto_primb = np.array([self.output_boa*np.cos(np.radians(self.output_gamma)),
self.output_boa*np.sin(np.radians(self.output_gamma)), 0])
self.emto_primc = np.array([0, 0, self.output_coa])
self.emto_basis = self.output_basis
elif self.spg_ibz == 13:
gamma = get_angle(self.primcc, self.primaa+self.primbb)
switch_x_y = np.array([[0, -1, 0],
[1, 0, 0],
[0, 0, 1]])
rot1 = np.array([[1.0,0.0,0.0],
[0.0,np.cos(np.radians(180-gamma)),-np.sin(np.radians(180-gamma))],
[0.0,np.sin(np.radians(180-gamma)),np.cos(np.radians(180-gamma))]])
rot2 = np.array([[0.0,0.0,1.0],
[0.0,1.0,0.0],
[-1.0,0.0,0.0]])
bc_norm = np.linalg.norm(self.primaa+self.primbb)
self.output_prima = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primcc)))/bc_norm
self.output_primb = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primaa)))/bc_norm
self.output_primc = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.primbb)))/bc_norm
# Apply transformation on the basis atoms
for i in range(len(self.output_basis[:, 0])):
self.output_basis[i, :] = np.dot(rot2, np.dot(rot1, np.dot(switch_x_y, self.output_basis[i, :])))/bc_norm
self.output_boa = np.abs(self.output_prima[1])
self.output_coa = np.abs(2*self.output_primc[2])
self.output_alpha = 0.0
self.output_beta = 0.0
self.output_gamma = gamma
self.emto_prima = np.array([0.0, -self.output_boa, 0])
self.emto_primb = np.array([0.5*np.sin(
|
np.radians(self.output_gamma)
|
numpy.radians
|
"""
Preprocesses data.
"""
import os
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
def dataset_specific(random_state, test_size):
"""
Put dataset specific processing here.
"""
# categorize attributes
columns = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status',
'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss',
'hours-per-week', 'native-country', 'label']
# retrieve dataset
train_df = pd.read_csv('adult.data', header=None, names=columns)
test_df = pd.read_csv('adult.test', header=None, names=columns)
# remove select columns
remove_cols = ['education-num']
if len(remove_cols) > 0:
train_df = train_df.drop(columns=remove_cols)
test_df = test_df.drop(columns=remove_cols)
columns = [x for x in columns if x not in remove_cols]
# remove nan rows
train_nan_rows = train_df[train_df.isnull().any(axis=1)]
test_nan_rows = test_df[test_df.isnull().any(axis=1)]
print('train nan rows: {}'.format(len(train_nan_rows)))
print('test nan rows: {}'.format(len(test_nan_rows)))
train_df = train_df.dropna()
test_df = test_df.dropna()
# fix label columns
test_df['label'] = test_df['label'].apply(lambda x: x.replace('.', ''))
# categorize attributes
label = ['label']
numeric = ['age', 'fnlwgt', 'capital-gain', 'capital-loss', 'hours-per-week']
categorical = list(set(columns) - set(numeric) - set(label))
return train_df, test_df, label, numeric, categorical
def main(random_state=1, test_size=0.2, n_bins=5):
train_df, test_df, label, numeric, categorical = dataset_specific(random_state=random_state,
test_size=test_size)
# binarize inputs
ct = ColumnTransformer([('kbd', KBinsDiscretizer(n_bins=n_bins, encode='onehot-dense'), numeric),
('ohe', OneHotEncoder(sparse=False, handle_unknown='ignore'), categorical)])
train = ct.fit_transform(train_df)
test = ct.transform(test_df)
# binarize outputs
le = LabelEncoder()
train_label = le.fit_transform(train_df[label].to_numpy().ravel()).reshape(-1, 1)
test_label = le.transform(test_df[label].to_numpy().ravel()).reshape(-1, 1)
# combine binarized data
train = np.hstack([train, train_label]).astype(np.int32)
test = np.hstack([test, test_label]).astype(np.int32)
print('train.shape: {}, label sum: {}'.format(train.shape, train[:, -1].sum()))
print('test.shape: {}, label sum: {}'.format(test.shape, test[:, -1].sum()))
# save to numpy format
print('saving...')
np.save('train.npy', train)
|
np.save('test.npy', test)
|
numpy.save
|
from __future__ import division
from .mesh import MeshVisual
import numpy as np
from numpy.linalg import norm
from ..util.transforms import rotate
from ..color import ColorArray
class TubeVisual(MeshVisual):
"""Displays a tube around a piecewise-linear path.
The tube mesh is corrected following its Frenet curvature and
torsion such that it varies smoothly along the curve, including if
the tube is closed.
Parameters
----------
points : ndarray
An array of (x, y, z) points describing the path along which the
tube will be extruded.
radius : float
The radius of the tube. Defaults to 1.0.
closed : bool
Whether the tube should be closed, joining the last point to the
first. Defaults to False.
color : Color | ColorArray
The color(s) to use when drawing the tube. The same color is
applied to each vertex of the mesh surrounding each point of
the line. If the input is a ColorArray, the argument will be
cycled; for instance if 'red' is passed then the entire tube
will be red, or if ['green', 'blue'] is passed then the points
will alternate between these colours. Defaults to 'purple'.
tube_points : int
The number of points in the circle-approximating polygon of the
tube's cross section. Defaults to 8.
shading : str | None
Same as for the `MeshVisual` class. Defaults to 'smooth'.
vertex_colors: ndarray | None
Same as for the `MeshVisual` class.
face_colors: ndarray | None
Same as for the `MeshVisual` class.
mode : str
Same as for the `MeshVisual` class. Defaults to 'triangles'.
"""
def __init__(self, points, radius=1.0,
closed=False,
color='purple',
tube_points=8,
shading='smooth',
vertex_colors=None,
face_colors=None,
mode='triangles'):
points = np.array(points)
tangents, normals, binormals = _frenet_frames(points, closed)
segments = len(points) - 1
# get the positions of each vertex
grid = np.zeros((len(points), tube_points, 3))
for i in range(len(points)):
pos = points[i]
normal = normals[i]
binormal = binormals[i]
# Add a vertex for each point on the circle
v = np.arange(tube_points,
dtype=np.float) / tube_points * 2 * np.pi
cx = -1. * radius * np.cos(v)
cy = radius * np.sin(v)
grid[i] = (pos + cx[:, np.newaxis]*normal +
cy[:, np.newaxis]*binormal)
# construct the mesh
indices = []
for i in range(segments):
for j in range(tube_points):
ip = (i+1) % segments if closed else i+1
jp = (j+1) % tube_points
index_a = i*tube_points + j
index_b = ip*tube_points + j
index_c = ip*tube_points + jp
index_d = i*tube_points + jp
indices.append([index_a, index_b, index_d])
indices.append([index_b, index_c, index_d])
vertices = grid.reshape(grid.shape[0]*grid.shape[1], 3)
color = ColorArray(color)
if vertex_colors is None:
point_colors = np.resize(color.rgba,
(len(points), 4))
vertex_colors = np.repeat(point_colors, tube_points, axis=0)
indices = np.array(indices, dtype=np.uint32)
MeshVisual.__init__(self, vertices, indices,
vertex_colors=vertex_colors,
face_colors=face_colors,
shading=shading,
mode=mode)
def draw(self, transforms):
"""Draw the visual
Parameters
----------
transforms : instance of TransformSystem
The transforms to use.
"""
MeshVisual.draw(self, transforms)
def _frenet_frames(points, closed):
'''Calculates and returns the tangents, normals and binormals for
the tube.'''
tangents = np.zeros((len(points), 3))
normals = np.zeros((len(points), 3))
epsilon = 0.0001
# Compute tangent vectors for each segment
tangents = np.roll(points, -1, axis=0) - np.roll(points, 1, axis=0)
if not closed:
tangents[0] = points[1] - points[0]
tangents[-1] = points[-1] - points[-2]
mags = np.sqrt(np.sum(tangents * tangents, axis=1))
tangents /= mags[:, np.newaxis]
# Get initial normal and binormal
t = np.abs(tangents[0])
smallest = np.argmin(t)
normal =
|
np.zeros(3)
|
numpy.zeros
|
import numpy as np
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
"""Creates a sample function that can be used for HER experience replay.
Args:
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
regular DDPG experience replay is used
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
as many HER replays as regular replays are used)
reward_fun (function): function to re-compute the reward with substituted goals
"""
if replay_strategy == 'future':
future_p = 1 - (1. / (1 + replay_k))
else: # 'replay_strategy' == 'none'
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
"""episode_batch is {key: array(buffer_size x T x dim_key)}
"""
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
# Select which episodes and time steps to use.
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples =
|
np.random.randint(T, size=batch_size)
|
numpy.random.randint
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements ``Wasserstein Adversarial Examples via Projected Sinkhorn Iterations`` as evasion attack.
| Paper link: https://arxiv.org/abs/1902.07906
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Optional, TYPE_CHECKING
import numpy as np
from scipy.special import lambertw
from tqdm.auto import trange
from art.config import ART_NUMPY_DTYPE
from art.estimators.estimator import BaseEstimator, LossGradientsMixin
from art.estimators.classification.classifier import ClassifierMixin
from art.attacks.attack import EvasionAttack
from art.utils import compute_success, get_labels_np_array, check_and_transform_label_format
if TYPE_CHECKING:
from art.utils import CLASSIFIER_LOSS_GRADIENTS_TYPE
logger = logging.getLogger(__name__)
EPS_LOG = 10 ** -10
class Wasserstein(EvasionAttack):
"""
Implements ``Wasserstein Adversarial Examples via Projected Sinkhorn Iterations`` as evasion attack.
| Paper link: https://arxiv.org/abs/1902.07906
"""
attack_params = EvasionAttack.attack_params + [
"targeted",
"regularization",
"p",
"kernel_size",
"eps_step",
"norm",
"ball",
"eps",
"eps_iter",
"eps_factor",
"max_iter",
"conjugate_sinkhorn_max_iter",
"projected_sinkhorn_max_iter",
"batch_size",
"verbose",
]
_estimator_requirements = (BaseEstimator, LossGradientsMixin, ClassifierMixin)
def __init__(
self,
estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE",
targeted: bool = False,
regularization: float = 3000.0,
p: int = 2,
kernel_size: int = 5,
eps_step: float = 0.1,
norm: str = "wasserstein",
ball: str = "wasserstein",
eps: float = 0.3,
eps_iter: int = 10,
eps_factor: float = 1.1,
max_iter: int = 400,
conjugate_sinkhorn_max_iter: int = 400,
projected_sinkhorn_max_iter: int = 400,
batch_size: int = 1,
verbose: bool = True,
):
"""
Create a Wasserstein attack instance.
:param estimator: A trained estimator.
:param targeted: Indicates whether the attack is targeted (True) or untargeted (False).
:param regularization: Entropy regularization.
:param p: The p-wasserstein distance.
:param kernel_size: Kernel size for computing the cost matrix.
:param eps_step: Attack step size (input variation) at each iteration.
:param norm: The norm of the adversarial perturbation. Possible values: `inf`, `1`, `2` or `wasserstein`.
:param ball: The ball of the adversarial perturbation. Possible values: `inf`, `1`, `2` or `wasserstein`.
:param eps: Maximum perturbation that the attacker can introduce.
:param eps_iter: Number of iterations to increase the epsilon.
:param eps_factor: Factor to increase the epsilon.
:param max_iter: The maximum number of iterations.
:param conjugate_sinkhorn_max_iter: The maximum number of iterations for the conjugate sinkhorn optimizer.
:param projected_sinkhorn_max_iter: The maximum number of iterations for the projected sinkhorn optimizer.
:param batch_size: Size of batches.
:param verbose: Show progress bars.
"""
super().__init__(estimator=estimator)
self._targeted = targeted
self.regularization = regularization
self.p = p # pylint: disable=C0103
self.kernel_size = kernel_size
self.eps_step = eps_step
self.norm = norm
self.ball = ball
self.eps = eps
self.eps_iter = eps_iter
self.eps_factor = eps_factor
self.max_iter = max_iter
self.conjugate_sinkhorn_max_iter = conjugate_sinkhorn_max_iter
self.projected_sinkhorn_max_iter = projected_sinkhorn_max_iter
self.batch_size = batch_size
self.verbose = verbose
self._check_params()
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:param cost_matrix: A non-negative cost matrix.
:type cost_matrix: `np.ndarray`
:return: An array holding the adversarial examples.
"""
y = check_and_transform_label_format(y, self.estimator.nb_classes)
x_adv = x.copy().astype(ART_NUMPY_DTYPE)
if y is None:
# Throw error if attack is targeted, but no targets are provided
if self.targeted:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# Use model predictions as correct outputs
targets = get_labels_np_array(self.estimator.predict(x, batch_size=self.batch_size))
else:
targets = y
if self.estimator.nb_classes == 2 and targets.shape[1] == 1:
raise ValueError(
"This attack has not yet been tested for binary classification with a single output classifier."
)
# Compute the cost matrix if needed
cost_matrix = kwargs.get("cost_matrix")
if cost_matrix is None:
cost_matrix = self._compute_cost_matrix(self.p, self.kernel_size)
# Compute perturbation with implicit batching
nb_batches = int(np.ceil(x.shape[0] / float(self.batch_size)))
for batch_id in trange(nb_batches, desc="Wasserstein", disable=not self.verbose):
logger.debug("Processing batch %i out of %i", batch_id, nb_batches)
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
batch = x_adv[batch_index_1:batch_index_2]
batch_labels = targets[batch_index_1:batch_index_2]
x_adv[batch_index_1:batch_index_2] = self._generate_batch(batch, batch_labels, cost_matrix)
logger.info(
"Success rate of attack: %.2f%%",
100 * compute_success(self.estimator, x, y, x_adv, self.targeted, batch_size=self.batch_size),
)
return x_adv
def _generate_batch(self, x: np.ndarray, targets: np.ndarray, cost_matrix: np.ndarray) -> np.ndarray:
"""
Generate a batch of adversarial samples and return them in an array.
:param x: An array with the original inputs.
:param targets: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)`.
:param cost_matrix: A non-negative cost matrix.
:return: Adversarial examples.
"""
adv_x = x.copy().astype(ART_NUMPY_DTYPE)
adv_x_best = x.copy().astype(ART_NUMPY_DTYPE)
if self.targeted:
err = np.argmax(self.estimator.predict(adv_x, batch_size=x.shape[0]), axis=1) == np.argmax(targets, axis=1)
else:
err = np.argmax(self.estimator.predict(adv_x, batch_size=x.shape[0]), axis=1) != np.argmax(targets, axis=1)
err_best = err
eps_ = np.ones(x.shape[0]) * self.eps
for i in range(self.max_iter):
adv_x = self._compute(adv_x, x, targets, cost_matrix, eps_, err)
if self.targeted:
err = np.argmax(self.estimator.predict(adv_x, batch_size=x.shape[0]), axis=1) == np.argmax(
targets, axis=1
)
else:
err = np.argmax(self.estimator.predict(adv_x, batch_size=x.shape[0]), axis=1) != np.argmax(
targets, axis=1
)
if np.mean(err) > np.mean(err_best):
err_best = err
adv_x_best = adv_x.copy()
if np.mean(err) == 1:
break
if (i + 1) % self.eps_iter == 0:
eps_[~err] *= self.eps_factor
return adv_x_best
def _compute(
self,
x_adv: np.ndarray,
x_init: np.ndarray,
y: np.ndarray,
cost_matrix: np.ndarray,
eps: np.ndarray,
err: np.ndarray,
) -> np.ndarray:
"""
Compute adversarial examples for one iteration.
:param x_adv: Current adversarial examples.
:param x_init: An array with the original inputs.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:param cost_matrix: A non-negative cost matrix.
:param eps: Maximum perturbation that the attacker can introduce.
:param err: Current successful adversarial examples.
:return: Adversarial examples.
"""
# Compute and apply perturbation
x_adv[~err] = self._compute_apply_perturbation(x_adv, y, cost_matrix)[~err]
# Do projection
x_adv[~err] = self._apply_projection(x_adv, x_init, cost_matrix, eps)[~err]
# Clip x_adv
if self.estimator.clip_values is not None:
clip_min, clip_max = self.estimator.clip_values
x_adv = np.clip(x_adv, clip_min, clip_max)
return x_adv
def _compute_apply_perturbation(self, x: np.ndarray, y: np.ndarray, cost_matrix: np.ndarray) -> np.ndarray:
"""
Compute and apply perturbations.
:param x: Current adversarial examples.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:param cost_matrix: A non-negative cost matrix.
:return: Adversarial examples.
"""
# Pick a small scalar to avoid division by 0
tol = 10e-8
# Get gradient wrt loss; invert it if attack is targeted
grad = self.estimator.loss_gradient(x, y) * (1 - 2 * int(self.targeted))
# Apply norm bound
if self.norm == "inf":
grad = np.sign(grad)
x_adv = x + self.eps_step * grad
elif self.norm == "1":
ind = tuple(range(1, len(x.shape)))
grad = grad / (np.sum(np.abs(grad), axis=ind, keepdims=True) + tol)
x_adv = x + self.eps_step * grad
elif self.norm == "2":
ind = tuple(range(1, len(x.shape)))
grad = grad / (np.sqrt(np.sum(np.square(grad), axis=ind, keepdims=True)) + tol)
x_adv = x + self.eps_step * grad
elif self.norm == "wasserstein":
x_adv = self._conjugate_sinkhorn(x, grad, cost_matrix)
else:
raise NotImplementedError(
"Values of `norm` different from `1`, `2`, `inf` and `wasserstein` are currently not supported."
)
return x_adv
def _apply_projection(
self, x: np.ndarray, x_init: np.ndarray, cost_matrix: np.ndarray, eps: np.ndarray
) -> np.ndarray:
"""
Apply projection on the ball of size `eps`.
:param x: Current adversarial examples.
:param x_init: An array with the original inputs.
:param cost_matrix: A non-negative cost matrix.
:param eps: Maximum perturbation that the attacker can introduce.
:return: Adversarial examples.
"""
# Pick a small scalar to avoid division by 0
tol = 10e-8
if self.ball == "2":
values = x - x_init
values_tmp = values.reshape((values.shape[0], -1))
values_tmp = values_tmp * np.expand_dims(
np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1) + tol)), axis=1
)
values = values_tmp.reshape(values.shape)
x_adv = values + x_init
elif self.ball == "1":
values = x - x_init
values_tmp = values.reshape((values.shape[0], -1))
values_tmp = values_tmp * np.expand_dims(
np.minimum(1.0, eps / (np.linalg.norm(values_tmp, axis=1, ord=1) + tol)), axis=1
)
values = values_tmp.reshape(values.shape)
x_adv = values + x_init
elif self.ball == "inf":
values = x - x_init
values_tmp = values.reshape((values.shape[0], -1))
values_tmp = np.sign(values_tmp) * np.minimum(abs(values_tmp), np.expand_dims(eps, -1))
values = values_tmp.reshape(values.shape)
x_adv = values + x_init
elif self.ball == "wasserstein":
x_adv = self._projected_sinkhorn(x, x_init, cost_matrix, eps)
else:
raise NotImplementedError(
"Values of `ball` different from `1`, `2`, `inf` and `wasserstein` are currently not supported."
)
return x_adv
def _conjugate_sinkhorn(self, x: np.ndarray, grad: np.ndarray, cost_matrix: np.ndarray) -> np.ndarray:
"""
The conjugate sinkhorn_optimizer.
:param x: Current adversarial examples.
:param grad: The loss gradients.
:param cost_matrix: A non-negative cost matrix.
:return: Adversarial examples.
"""
# Normalize inputs
normalization = x.reshape(x.shape[0], -1).sum(-1).reshape(x.shape[0], 1, 1, 1)
x = x.copy() / normalization
# Dimension size for each example
m = np.prod(x.shape[1:])
# Initialize
alpha = np.log(np.ones(x.shape) / m) + 0.5
exp_alpha = np.exp(-alpha)
beta = -self.regularization * grad
beta = beta.astype(np.float64)
exp_beta = np.exp(-beta)
# Check for overflow
if (exp_beta == np.inf).any():
raise ValueError("Overflow error in `_conjugate_sinkhorn` for exponential beta.")
cost_matrix_new = cost_matrix.copy() + 1
cost_matrix_new = np.expand_dims(np.expand_dims(cost_matrix_new, 0), 0)
i_nonzero = self._batch_dot(x, self._local_transport(cost_matrix_new, grad, self.kernel_size)) != 0
i_nonzero_ = np.zeros(alpha.shape).astype(bool)
i_nonzero_[:, :, :, :] = np.expand_dims(np.expand_dims(np.expand_dims(i_nonzero, -1), -1), -1)
psi = np.ones(x.shape[0])
var_k = np.expand_dims(np.expand_dims(np.expand_dims(psi, -1), -1), -1)
var_k = np.exp(-var_k * cost_matrix - 1)
convergence = -np.inf
for _ in range(self.conjugate_sinkhorn_max_iter):
# Block coordinate descent iterates
x[x == 0.0] = EPS_LOG # Prevent divide by zero in np.log
alpha[i_nonzero_] = (np.log(self._local_transport(var_k, exp_beta, self.kernel_size)) - np.log(x))[
i_nonzero_
]
exp_alpha = np.exp(-alpha)
# Newton step
var_g = -self.eps_step + self._batch_dot(
exp_alpha, self._local_transport(cost_matrix * var_k, exp_beta, self.kernel_size)
)
var_h = -self._batch_dot(
exp_alpha, self._local_transport(cost_matrix * cost_matrix * var_k, exp_beta, self.kernel_size)
)
delta = var_g / var_h
# Ensure psi >= 0
tmp = np.ones(delta.shape)
neg = psi - tmp * delta < 0
while neg.any() and np.min(tmp) > 1e-2:
tmp[neg] /= 2
neg = psi - tmp * delta < 0
psi[i_nonzero] = np.maximum(psi - tmp * delta, 0)[i_nonzero]
# Update K
var_k = np.expand_dims(np.expand_dims(np.expand_dims(psi, -1), -1), -1)
var_k = np.exp(-var_k * cost_matrix - 1)
# Check for convergence
next_convergence = self._conjugated_sinkhorn_evaluation(x, alpha, exp_alpha, exp_beta, psi, var_k)
if (np.abs(convergence - next_convergence) <= 1e-4 + 1e-4 * np.abs(next_convergence)).all():
break
convergence = next_convergence
result = exp_beta * self._local_transport(var_k, exp_alpha, self.kernel_size)
result[~i_nonzero] = 0
result *= normalization
return result
def _projected_sinkhorn(
self, x: np.ndarray, x_init: np.ndarray, cost_matrix: np.ndarray, eps: np.ndarray
) -> np.ndarray:
"""
The projected sinkhorn_optimizer.
:param x: Current adversarial examples.
:param x_init: An array with the original inputs.
:param cost_matrix: A non-negative cost matrix.
:param eps: Maximum perturbation that the attacker can introduce.
:return: Adversarial examples.
"""
# Normalize inputs
normalization = x_init.reshape(x.shape[0], -1).sum(-1).reshape(x.shape[0], 1, 1, 1)
x = x.copy() / normalization
x_init = x_init.copy() / normalization
# Dimension size for each example
m = np.prod(x_init.shape[1:])
# Initialize
beta = np.log(np.ones(x.shape) / m)
exp_beta = np.exp(-beta)
psi = np.ones(x.shape[0])
var_k = np.expand_dims(np.expand_dims(np.expand_dims(psi, -1), -1), -1)
var_k = np.exp(-var_k * cost_matrix - 1)
convergence = -np.inf
for _ in range(self.projected_sinkhorn_max_iter):
# Block coordinate descent iterates
x_init[x_init == 0.0] = EPS_LOG # Prevent divide by zero in np.log
alpha = np.log(self._local_transport(var_k, exp_beta, self.kernel_size)) - np.log(x_init)
exp_alpha = np.exp(-alpha)
beta = (
self.regularization
* np.exp(self.regularization * x)
* self._local_transport(var_k, exp_alpha, self.kernel_size)
)
beta[beta > 1e-10] = np.real(lambertw(beta[beta > 1e-10]))
beta -= self.regularization * x
exp_beta = np.exp(-beta)
# Newton step
var_g = -eps + self._batch_dot(
exp_alpha, self._local_transport(cost_matrix * var_k, exp_beta, self.kernel_size)
)
var_h = -self._batch_dot(
exp_alpha, self._local_transport(cost_matrix * cost_matrix * var_k, exp_beta, self.kernel_size)
)
delta = var_g / var_h
# Ensure psi >= 0
tmp = np.ones(delta.shape)
neg = psi - tmp * delta < 0
while neg.any() and np.min(tmp) > 1e-2:
tmp[neg] /= 2
neg = psi - tmp * delta < 0
psi = np.maximum(psi - tmp * delta, 0)
# Update K
var_k = np.expand_dims(np.expand_dims(np.expand_dims(psi, -1), -1), -1)
var_k = np.exp(-var_k * cost_matrix - 1)
# Check for convergence
next_convergence = self._projected_sinkhorn_evaluation(
x,
x_init,
alpha,
exp_alpha,
beta,
exp_beta,
psi,
var_k,
eps,
)
if (np.abs(convergence - next_convergence) <= 1e-4 + 1e-4 *
|
np.abs(next_convergence)
|
numpy.abs
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import scipy.io as sio
import numpy as np
import PIL.Image as Image
import PIL.ImageDraw as ImageDraw
import cPickle
import torch
import time
import json as json
from libs.datasets.dataloader import sDataLoader
import libs.configs.config as cfg
import libs.boxes.cython_bbox as cython_bbox
from libs.nets.utils import everything2tensor
from libs.datasets.cityscapesscripts.helpers.annotation import Annotation
from libs.datasets.cityscapesscripts.helpers.labels import name2label
from libs.datasets.citypersons_eval import COCO, COCOeval
from libs.layers.data_layer import data_layer_keep_aspect_ratio, \
data_layer_keep_aspect_ratio_batch
"""
dir structure:
./data/
data/citypersons
data/citypersons/cityscape
data/citypersons/cityscape/leftImg8bit/{train|val|test}
data/citypersons/cityscape/gtFine/{train|val|test}
"""
class citypersons_extend(object):
block_threshold = 0.3
block_threshold_low = 0.05
def __init__(self, data_root, split, is_training=True):
assert split in ['train', 'val', 'test'], \
'unknow citypersons split settings: {}'.format(split)
self.data_root = data_root
self.annot_path = os.path.join(data_root, 'annotations', 'anno_' + split + '.mat')
assert os.path.exists(self.annot_path), \
'{} not found'.format(self.annot_path)
self.cityscape_path = os.path.join(data_root, 'cityscape')
self.split = split
self.extend_dir = os.path.join(self.data_root, 'extend', self.split)
self.vis_dir = os.path.join(self.data_root, 'visualization', self.split)
if not os.path.exists(self.extend_dir):
os.makedirs(self.extend_dir)
if not os.path.exists(self.vis_dir):
os.makedirs(self.vis_dir)
self.gt_annots = []
self.extend_annots = []
self.classes = ('__background__', # always index 0
'pedestrian', 'rider', 'sitting', 'unusual', 'group')
# self.classes = ('__background__', # always index 0
# 'pedestrian')
self._is_training = is_training
self.patch_path = os.path.join(data_root, 'patches')
if not os.path.exists(self.patch_path):
os.makedirs(self.patch_path)
self.all_instances = []
self.load_gt()
# self._build_p_anchors()
def load_gt(self):
annots = sio.loadmat(self.annot_path)
annots = annots['anno_'+self.split+'_aligned'].reshape([-1])
for ann in annots:
ann = ann.reshape([-1])
city_name, image_name, bbs = ann[0][0][0], ann[0][1][0], ann[0][2]
city_name = 'tubingen' if city_name == 'tuebingen' else city_name
json_name = image_name.replace('_leftImg8bit.png', '_gtFine_polygons.json')
img_name = os.path.join(self.cityscape_path, 'leftImg8bit', self.split, city_name, image_name)
json_name = os.path.join(self.cityscape_path, 'gtFine', self.split, city_name, json_name)
if not os.path.exists(img_name):
raise ValueError('image {} not found'.format(img_name))
bbs = bbs.astype(np.float32)
gt_classes, gt_boxes = bbs[:, 0], bbs[:, 1:5]
gt_boxes_vis = bbs[:, 6:10]
gt_boxes[:, 2:4] += gt_boxes[:, 0:2]
gt_boxes_vis[:, 2:4] += gt_boxes_vis[:, 0:2]
self.gt_annots.append({
'img_name': img_name,
'json_name': json_name,
'gt_boxes': gt_boxes.astype(np.float32),
'gt_classes': gt_classes,
'gt_boxes_vis': gt_boxes_vis.astype(np.float32),
})
def get_inst_mask(self, gt_annot, min_overlap=0.8, min_visible=0.8, min_height=150, min_width=50):
json_name = gt_annot['json_name']
gt_boxes_vis = gt_annot['gt_boxes_vis']
annotation = Annotation()
annotation.fromJsonFile(json_name)
size = (annotation.imgWidth, annotation.imgHeight)
background = name2label['unlabeled'].id
inst_img = Image.new("L", size, background)
drawer_whole = ImageDraw.Draw(inst_img)
cnt = 1
instances = []
for obj in annotation.objects:
label = obj.label
polygon = obj.polygon
# polygon = np.array(polygon, dtype=np.int32)
if label not in ['person']:
continue
x1, y1 = np.array(polygon).min(axis=0)
x2, y2 = np.array(polygon).max(axis=0)
# def PolyArea(x, y):
# return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
# polygon_np = np.array(polygon)
# area = PolyArea(polygon_np[:, 0], polygon_np[:, 1])
if obj.deleted:
continue
if name2label[label].id < 0:
continue
# id = name2label[label].id # 25, 26 for person and rider
cnt += 1
drawer_whole.polygon(polygon, fill=cnt)
I = np.asarray(inst_img)
area = (I == cnt).sum()
# print (area, area_)
idx, max_ov= get_corresponding_gt_box(gt_annot['gt_boxes'], box=
|
np.array([x1, y1, x2, y2], dtype=np.float32)
|
numpy.array
|
import itertools
import logging
import numpy as np
from scipy import sparse
from ahmia import utils
from ahmia.models import PagePopScore, PagePopStats
from ahmia.validators import is_valid_full_onion_url, is_valid_onion
logger = logging.getLogger('ahmia')
def is_legit_link(link, entry):
"""
Estimate whether a link is legit or not (check: hidden links, etc)
TODO: ignore hidden links or any other tricky / promoting links like:
TODO: links to mirror sites (performance issue for local papepop)
TODO: Dead links (performance issue ~ resolve via the crawler?)
"""
# ignore any links without text ~ but some entries dont have link_name so:
if 'link_name' in link and not link['link_name']:
return False
return True
class PagePopHandler(object):
def __init__(self, entries=None, domains=None, beta=0.85, epsilon=10 ** -6):
"""
Handler for PagePop algorithm. It uses a :dict: `domain_idxs` in
order to assign each domain an id, that's the corresponding
index to `scores`, list, that hold the pagepop scores.
``self.num_domains`` is used as a counter while building
adjacency matrix and ``domain_idxs`` is also stored as an
instance attribute for statistics purposes.
todo: profile code to reduce execution time (critical for local pagepop)
todo: simplify by merging domain_idxs and scores?
:param entries: An iterable to fetch entries from. It should be a
generator in order to reduce memory used.
:param domains: If non-empty then inspect only the links that point
to webpages under these domains
:param beta: 1-teleportation probability.
:param epsilon: stop condition. Minimum allowed amount of change
in the PagePops between iterations.
"""
self.entries = entries
self.domains = domains
self.beta = beta
self.epsilon = epsilon
self.domains_idxs = {}
self.scores = None
self.num_domains = 0
self.num_links = None
self.num_edges = None
def get_scores_as_dict(self):
"""
Associate onion with its score and returns the corresponding dict
:rtype dict
"""
objs = {}
for onion, index in self.domains_idxs.items():
objs[onion] = float(self.scores[index])
return objs
def get_scores(self):
"""
Associate onion with its score and returns PagePopScore objs
:rtype list
"""
objs = []
for onion, index in self.domains_idxs.items():
kwargs = {
'onion': onion,
'score': self.scores[index],
}
new_obj = PagePopScore(**kwargs)
objs.append(new_obj)
return objs
def get_stats(self):
"""
Return number of domains (nodes), links (total links),
edges (unique inter-site links) as a dict
:rtype: ``dict``
"""
ret = {
'num_nodes': self.num_domains,
'num_links': self.num_links,
'num_edges': self.num_edges
}
return ret
def save(self):
"""
Associates each onion with its score, using the parallel
structures: self.domain_idxs, self.scores, and stores the
results in the DB: PagePop model
"""
# Bulk Delete
PagePopScore.objects.all().delete()
# Associate onion with its score and create PagePopScore objs
objs = self.get_scores()
# Bulk Save the objects into the DB
PagePopScore.objects.bulk_create(objs)
# save stats
self._store_page_pop_stats()
def build_pagescores(self, entries=None, domains=None):
"""
Calculate the popularity of each domain and save scores to
`self.scores`, and the respecting indices in `self.domain_idxs`.
:param entries: An iterable that yields all the ES entries.
If not provided, the instance (class) attribute is used.
:param domains: If non-empty then inspect only the links that point
to webpages under these domains
"""
es_entries = entries or self.entries
domains = domains or self.domains
adj_graph = self._build_adjacency_graph(es_entries, domains)
matrix = self._build_sparse_matrix(adj_graph)
_ = self._compute_page_pop(matrix)
def _build_adjacency_graph(self, entries, domains):
"""
Constructs adjacency graph for outgoing links, saves to self.adj_graph
todo: mv ES entries/documents parsing in separate function?
:param entries: An iterable that contains the ES entries.
Preferably a generator to reduce RAM usage.
:param domains: If non-empty then inspect only the links that point
to webpages under these domains
:return: adjacency matrix
:rtype: ``list`` of tuples
"""
entries = entries or self.entries
adj_graph = []
for e in entries:
if '_source' in e:
# if called by `calc_page_pop.py` then `e` is an ES document
e = e['_source']
if 'domain' in e:
# entry from crawled page
origin = e['domain']
elif 'source' in e:
# entry from anchor text
origin = e['source']
else:
logger.warning('rank_pages: Unable to process: %s' % e)
continue
origin = utils.extract_domain_from_url(origin)
if is_valid_onion(origin):
origin_idx = self._get_domain_idx(origin)
links = e.get('links', []) # crawled case
if 'target' in e: # anchor case
links.append({'link': e['target']})
for l in links:
# ignore SE-spam links
if is_legit_link(l, e):
url = l['link']
if is_valid_full_onion_url(url):
destiny = utils.extract_domain_from_url(url)
# if domains non-empty ignore any other origins
if not domains or destiny in domains:
if destiny != origin:
destiny_idx = self._get_domain_idx(destiny)
adj_graph.append((origin_idx, destiny_idx))
self.num_links = len(adj_graph) # total links
adj_graph = set(adj_graph) # count only 1 edge of source->destiny
self.num_edges = len(adj_graph) # unique links
return adj_graph
def _build_sparse_matrix(self, adj_graph, num_nodes=None):
"""
Builds a sparse matrix needed by compute_page_pop()
:param adj_graph: A directed adjacency pragh as an iterable structure
:param num_nodes: The number of nodes referenced in adj_graph
:return: A sparse boolean matrix representing a link map
:rtype: ``scipy.sparse.csr.csr_matrix``
"""
num_nodes = num_nodes or self.num_domains
row = [edge[1] for edge in adj_graph] # destinies
col = [edge[0] for edge in adj_graph] # origins
# if number of nodes not supplied count distinctly the nodes
num_nodes = num_nodes or len(set(itertools.chain(row, col)))
# print("nodes counted: %s" % len(set(itertools.chain(row, col))))
return sparse.csr_matrix(
([True] * len(adj_graph), (row, col)),
shape=(num_nodes, num_nodes))
def _compute_page_pop(self, adj, beta=None, epsilon=None):
"""
Efficient computation of the PagePop values using a sparse adjacency
matrix and the iterative power method.
based on https://is.gd/weFAC1 (blog.samuelmh.com)
:param adj: boolean adjacency matrix. If adj_j,i is True,
then there is a link from i to j
:type adj: scipy.sparse.csr.csr_matrix
:param beta: 1-teleportation probability.
:param epsilon: stop condition. Minimum allowed amount of
change in the PagePops between iterations.
:return: PagePop array normalized
:rtype: ``numpy.ndarray``
"""
beta = beta or self.beta
epsilon = epsilon or self.epsilon
n, _ = adj.shape
# Test adjacency matrix is OK
assert (adj.shape == (n, n))
# Constants Speed-UP
deg_out_beta = adj.sum(axis=0).T / beta # vector
# Initialize
scores = np.ones((n, 1)) / n # vector
iterations = 0
flag = True
while flag:
iterations += 1
with np.errstate(divide='ignore'):
# Ignore division by 0 on scores/deg_out_beta
new_scores = adj.dot((scores / deg_out_beta)) # vector
# Leaked PagePop
new_scores += (1 - new_scores.sum()) / n
# Stop condition
if
|
np.linalg.norm(scores - new_scores, ord=1)
|
numpy.linalg.norm
|
import argparse
from functools import partial
import hashlib
import json
import logging
from pathlib import Path
import pickle
from typing import Callable, List, Tuple
import warnings
import faiss
import numpy as np
import spacy
from tqdm import tqdm
from text_to_uri import english_filter, replace_numbers
CACHE_DIR = Path('.cache/')
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
SPACY_MODEL='en_core_web_lg'
def init_cache():
if not CACHE_DIR.exists():
logger.debug(f'Creating cache dir at: {CACHE_DIR}')
CACHE_DIR.mkdir(parents=True)
def _cache_path(fn, args, kwargs):
fn_name = fn.__name__
args_string = ','.join(str(arg) for arg in args)
kwargs_string = json.dumps(kwargs)
byte_string = fn_name + args_string + kwargs_string
hash_object = hashlib.sha1(byte_string.encode())
return CACHE_DIR / hash_object.hexdigest()
def cache():
def decorator(fn):
def load_cached_if_available(*args, **kwargs):
path = _cache_path(fn, args, kwargs)
if path.exists():
logger.debug(f'Loading `{fn.__name__}` output from cache')
with open(path, 'rb') as f:
return pickle.load(f)
output = fn(*args, **kwargs)
with open(path, 'wb') as f:
pickle.dump(output, f)
return output
return load_cached_if_available
return decorator
class Vocab:
def __init__(self, words) -> None:
self.idx_to_word = words
self.word_to_idx = {word: idx for idx, word in enumerate(words)}
@cache()
def read_embedding_file(embedding_file: Path) -> Tuple[Vocab, np.ndarray]:
logger.debug(f'Reading embeddings from {embedding_file}')
with open(embedding_file, 'r') as f:
info = next(f)
shape = tuple(int(x) for x in info.split())
embeddings = np.zeros(shape, dtype=np.float32)
words = []
for i, line in tqdm(enumerate(f), total=shape[0]):
word, *embedding = line.split()
embedding = np.array([float(x) for x in embedding])
words.append(word)
embeddings[i] = embedding
vocab = Vocab(words)
return vocab, embeddings
def build_index(metric: str, embeddings: np.ndarray) -> faiss.swigfaiss_avx2.Index:
logger.debug(f'Building search index')
if metric == 'cosine':
index = faiss.IndexFlatIP(embeddings.shape[-1])
elif metric == 'l2':
index = faiss.IndexFlatL2(embeddings.shape[-1])
else:
raise ValueError(f'Bad metric: {metric}')
index.add(embeddings)
return index
def generate_instances(dataset: Path):
with open(dataset, 'r') as f:
for line in f:
yield(json.loads(line))
def get_extraction_fn(extraction_strategy: str,
ngram_length: int) -> Callable[[List[str], Vocab], List[str]]:
if extraction_strategy == 'exhaustive':
return partial(exhaustive_extraction, ngram_length=ngram_length)
elif extraction_strategy == 'greedy':
return partial(greedy_extraction, ngram_length=ngram_length)
elif extraction_strategy == 'root':
return partial(root_extraction, ngram_length=ngram_length)
else:
raise ValueError(f'Bad extraction strategy: {extraction_strategy}')
def exhaustive_extraction(phrase: List[str],
vocab: Vocab,
ngram_length: int) -> List[str]:
tokens = english_filter([x.lower() for x in phrase])
num_tokens = len(tokens)
out = []
for n in range(1, ngram_length):
for i in range(num_tokens - n + 1):
concept = replace_numbers('_'.join(tokens[i: i+n]))
if concept in vocab.word_to_idx:
out.append(concept)
return out
def greedy_extraction(phrase: List[str],
vocab: Vocab,
ngram_length: int) -> List[str]:
tokens = english_filter([x.lower() for x in phrase])
out = []
while len(tokens) > 0:
for n in range(ngram_length + 1, 0, -1):
concept = replace_numbers('_'.join(tokens[:n]))
if concept in vocab.word_to_idx:
out.append(concept)
tokens = tokens[n:]
break
elif n == 1:
tokens = tokens[n:]
return out
def root_extraction(phrase: List[str],
vocab: Vocab,
ngram_length: int) -> List[str]:
doc = nlp(' '.join(phrase))
# Logic for noun phrases
for chunk in doc.noun_chunks:
if chunk.root.dep_ == 'ROOT':
# Return entire chunk if in vocab
tokens = english_filter([token.text.lower() for token in chunk])
concept = replace_numbers('_'.join(tokens))
if concept in vocab.word_to_idx:
return [concept]
# Otherwise return the just the root token
root = replace_numbers(chunk.root.text.lower())
if root in vocab.word_to_idx:
return [root]
# Logic for other types of roots
for token in doc:
if token.dep_ == 'ROOT':
concept = replace_numbers(token.text.lower())
if concept in vocab.word_to_idx:
return [concept]
return []
def link(graphs: List,
output: Path = None,
embedding_file: Path = Path('../numberbatch-en-19.08.txt'),
metric: str = 'cosine',
extraction_strategy: str = 'greedy',
ngram_length: int = 3,
num_candidates: int = 5,
debug: bool = False) -> List:
"""
Browse the top-k conceptnet candidates for a node.
Parameters
==========
input : List
A list containing parsed alpha NLI graphs.
output : Path
Jsonl file to serialize output to.
embedding_file : Path
A txt file containing the embeddings.
metric: str
Similarity metric. One of: 'cosine', 'l2'
extraction_strategy: str
Approach for extracting concepts from mentions. One of: 'exhaustive', 'greedy'
ngram_length: int
Max length of n-grams to consider during concept extraction.
num_candidates : int
Number of candidates to return.
"""
assert metric in {'cosine', 'l2'}
assert extraction_strategy in {'exhaustive', 'greedy', 'root'}
if debug:
logger.setLevel(logging.DEBUG)
global nlp
nlp = spacy.load(SPACY_MODEL)
init_cache()
vocab, embeddings = read_embedding_file(embedding_file)
index = build_index(metric, embeddings)
extraction_fn = get_extraction_fn(extraction_strategy, ngram_length)
if output:
output_file = open(output, 'w')
output_instances=[]
for instance in graphs:
output_instance = instance.copy()
for uri, node in instance['nodes'].items():
# Extract concept tokens from phrase
phrase = node['phrase']
concepts = extraction_fn(phrase, vocab)
concept_ids = np.array([vocab.word_to_idx[concept] for concept in concepts])
if len(concept_ids) > 0:
if len(concept_ids) > 1:
mean_embedding =
|
np.mean(embeddings[concept_ids], axis=0, keepdims=True)
|
numpy.mean
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import sys
import os
import glog
from matplotlib.ticker import FormatStrFormatter
root_data_location = './figures/'
def get_allocations_and_time(ifile):
# print(ifile)
fd = open(ifile, 'r')
lines = fd.readlines()
start_read = False
execution_time = []
allocations = []
total_allocs = 0
for line in lines:
if not start_read and 'init' in line:
start_read = True
continue
if 'init' in line:
continue
if 'done' in line:
start_read = False
break
if start_read:
# stop after cumulative 3600 seconds (one hour)
cumul_time = float(line.split(' ')[2])
if cumul_time > 3600:
break
total_allocs += 1
execution_time.append(float(line.split(' ')[2]))
allocations.append(total_allocs)
fd.close()
return (allocations, execution_time)
def cactus_plot(plot_file_name, datacenterName, vdcName, datasets,
allocs_range, time_range):
print('started plotting')
# fig, ax = plt.subplots(figsize=(3.25, 3.25))
fig, ax = plt.subplots(figsize=(4, 2.25))
n_groups = len(datasets)
lines = []
axes = plt.gca()
n_of_markers = 4
#plt.title(datacenterName + " " + vdcName)
for index, (name, color, marker, msize, file) in enumerate(datasets):
if os.path.exists(file):
allocations, execution_times = get_allocations_and_time(file)
if(len(allocations)==0):
print("Warning: No allocations found for " + file)
mspace = allocs_range[-1]/n_of_markers
line = ax.plot(execution_times, allocations, color=color,
marker=marker, markevery=mspace, markersize=msize, label=name)
else:
print("Warning: Missing file: " + file)
ax.legend(loc='upper left', bbox_to_anchor=(0, 1.8), numpoints=2, ncol=2,
frameon=False)
plt.xlabel('CPU Time (s)')
plt.ylabel('Number of VDC allocations')
plt.yticks(allocs_range)
# set time axis a bit forward, to make sure that secondnet runs (that are very close to 0) are visible
xshift = max(3, time_range[1]/10)
ax.set_xlim(-xshift)
plt.xticks(time_range)
fig.tight_layout()
plt.draw()
final_figure = plt.gcf()
final_figure.savefig(plot_file_name, bbox_inches='tight', dpi=200)
print('plotting done, see %s' % plot_file_name)
plt.close()
def fig6():
# included in the paper.pdf
# fig6_cactus_2000_16_T1_9VMs.pdf
# fig6_cactus_2000_16_T3_15VMs.pdf
servers_and_cores = [('200', '4'), #('200', '16'), ('400', '16'),
('2000', '16')]
allocs_ranges = [np.arange(0, 101, 25),
np.arange(0, 101, 25),
np.arange(0, 101, 25),
np.arange(0, 61, 20),
np.arange(0, 61, 20),
np.arange(0, 61, 20),
np.arange(0, 4001, 1000),
np.arange(0, 3601, 900),
np.arange(0, 3001, 750),
np.arange(0, 1201, 300),
np.arange(0, 1201, 300),
np.arange(0, 1201, 300)]
time_ranges = [
|
np.arange(0, 61, 15)
|
numpy.arange
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.