repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jmschrei/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits.py
|
268
|
2723
|
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
bsd-3-clause
|
ccauet/scikit-optimize
|
skopt/learning/gaussian_process/kernels.py
|
1
|
14710
|
from math import sqrt
import numpy as np
from sklearn.gaussian_process.kernels import Kernel as sk_Kernel
from sklearn.gaussian_process.kernels import ConstantKernel as sk_ConstantKernel
from sklearn.gaussian_process.kernels import DotProduct as sk_DotProduct
from sklearn.gaussian_process.kernels import Exponentiation as sk_Exponentiation
from sklearn.gaussian_process.kernels import ExpSineSquared as sk_ExpSineSquared
from sklearn.gaussian_process.kernels import Hyperparameter
from sklearn.gaussian_process.kernels import Matern as sk_Matern
from sklearn.gaussian_process.kernels import NormalizedKernelMixin as sk_NormalizedKernelMixin
from sklearn.gaussian_process.kernels import Product as sk_Product
from sklearn.gaussian_process.kernels import RationalQuadratic as sk_RationalQuadratic
from sklearn.gaussian_process.kernels import RBF as sk_RBF
from sklearn.gaussian_process.kernels import StationaryKernelMixin as sk_StationaryKernelMixin
from sklearn.gaussian_process.kernels import Sum as sk_Sum
from sklearn.gaussian_process.kernels import WhiteKernel as sk_WhiteKernel
class Kernel(sk_Kernel):
"""
Base class for skopt.gaussian_process kernels.
Supports computation of the gradient of the kernel with respect to X
"""
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def gradient_x(self, x, X_train):
"""
Computes gradient of K(x, X_train) with respect to x
Parameters
----------
x: array-like, shape=(n_features,)
A single test point.
Y: array-like, shape=(n_samples, n_features)
Training data used to fit the gaussian process.
Returns
-------
gradient_x: array-like, shape=(n_samples, n_features)
Gradient of K(x, X_train) with respect to x.
"""
raise NotImplementedError
class RBF(Kernel, sk_RBF):
def gradient_x(self, x, X_train):
# diff = (x - X) / length_scale
# size = (n_train_samples, n_dimensions)
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = np.asarray(self.length_scale)
diff = x - X_train
diff /= length_scale
# e = -exp(0.5 * \sum_{i=1}^d (diff ** 2))
# size = (n_train_samples, 1)
exp_diff_squared = np.sum(diff**2, axis=1)
exp_diff_squared *= -0.5
exp_diff_squared = np.exp(exp_diff_squared, exp_diff_squared)
exp_diff_squared = np.expand_dims(exp_diff_squared, axis=1)
exp_diff_squared *= -1
# gradient = (e * diff) / length_scale
gradient = exp_diff_squared * diff
gradient /= length_scale
return gradient
class Matern(Kernel, sk_Matern):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = np.asarray(self.length_scale)
# diff = (x - X_train) / length_scale
# size = (n_train_samples, n_dimensions)
diff = x - X_train
diff /= length_scale
# dist_sq = \sum_{i=1}^d (diff ^ 2)
# dist = sqrt(dist_sq)
# size = (n_train_samples,)
dist_sq = np.sum(diff**2, axis=1)
dist = np.sqrt(dist_sq)
if self.nu == 0.5:
# e = -np.exp(-dist) / dist
# size = (n_train_samples, 1)
scaled_exp_dist = -dist
scaled_exp_dist = np.exp(scaled_exp_dist, scaled_exp_dist)
scaled_exp_dist *= -1
# grad = (e * diff) / length_scale
# For all i in [0, D) if x_i equals y_i.
# 1. e -> -1
# 2. (x_i - y_i) / \sum_{j=1}^D (x_i - y_i)**2 approaches 1.
# Hence the gradient when for all i in [0, D),
# x_i equals y_i is -1 / length_scale[i].
gradient = -np.ones((X_train.shape[0], x.shape[0]))
mask = dist != 0.0
scaled_exp_dist[mask] /= dist[mask]
scaled_exp_dist = np.expand_dims(scaled_exp_dist, axis=1)
gradient[mask] = scaled_exp_dist[mask] * diff[mask]
gradient /= length_scale
return gradient
elif self.nu == 1.5:
# grad(fg) = f'g + fg'
# where f = 1 + sqrt(3) * euclidean((X - Y) / length_scale)
# where g = exp(-sqrt(3) * euclidean((X - Y) / length_scale))
sqrt_3_dist = sqrt(3) * dist
f = np.expand_dims(1 + sqrt_3_dist, axis=1)
# When all of x_i equals y_i, f equals 1.0, (1 - f) equals
# zero, hence from below
# f * g_grad + g * f_grad (where g_grad = -g * f_grad)
# -f * g * f_grad + g * f_grad
# g * f_grad * (1 - f) equals zero.
# sqrt_3_by_dist can be set to any value since diff equals
# zero for this corner case.
sqrt_3_by_dist = np.zeros_like(dist)
nzd = dist != 0.0
sqrt_3_by_dist[nzd] = sqrt(3) / dist[nzd]
dist_expand = np.expand_dims(sqrt_3_by_dist, axis=1)
f_grad = diff / length_scale
f_grad *= dist_expand
sqrt_3_dist *= -1
exp_sqrt_3_dist = np.exp(sqrt_3_dist, sqrt_3_dist)
g = np.expand_dims(exp_sqrt_3_dist, axis=1)
g_grad = -g * f_grad
# f * g_grad + g * f_grad (where g_grad = -g * f_grad)
f *= -1
f += 1
return g * f_grad * f
elif self.nu == 2.5:
# grad(fg) = f'g + fg'
# where f = (1 + sqrt(5) * euclidean((X - Y) / length_scale) +
# 5 / 3 * sqeuclidean((X - Y) / length_scale))
# where g = exp(-sqrt(5) * euclidean((X - Y) / length_scale))
sqrt_5_dist = sqrt(5) * dist
f2 = (5.0 / 3.0) * dist_sq
f2 += sqrt_5_dist
f2 += 1
f = np.expand_dims(f2, axis=1)
# For i in [0, D) if x_i equals y_i
# f = 1 and g = 1
# Grad = f'g + fg' = f' + g'
# f' = f_1' + f_2'
# Also g' = -g * f1'
# Grad = f'g - g * f1' * f
# Grad = g * (f' - f1' * f)
# Grad = f' - f1'
# Grad = f2' which equals zero when x = y
# Since for this corner case, diff equals zero,
# dist can be set to anything.
nzd_mask = dist != 0.0
nzd = dist[nzd_mask]
dist[nzd_mask] = np.reciprocal(nzd, nzd)
dist *= sqrt(5)
dist = np.expand_dims(dist, axis=1)
diff /= length_scale
f1_grad = dist * diff
f2_grad = (10.0 / 3.0) * diff
f_grad = f1_grad + f2_grad
sqrt_5_dist *= -1
g = np.exp(sqrt_5_dist, sqrt_5_dist)
g = np.expand_dims(g, axis=1)
g_grad = -g * f1_grad
return f * g_grad + g * f_grad
class RationalQuadratic(Kernel, sk_RationalQuadratic):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
alpha = self.alpha
length_scale = self.length_scale
# diff = (x - X_train) / length_scale
# size = (n_train_samples, n_dimensions)
diff = x - X_train
diff /= length_scale
# dist = -(1 + (\sum_{i=1}^d (diff^2) / (2 * alpha)))** (-alpha - 1)
# size = (n_train_samples,)
scaled_dist = np.sum(diff**2, axis=1)
scaled_dist /= (2 * self.alpha)
scaled_dist += 1
scaled_dist **= (-alpha - 1)
scaled_dist *= -1
scaled_dist = np.expand_dims(scaled_dist, axis=1)
diff_by_ls = diff / length_scale
return scaled_dist * diff_by_ls
class ExpSineSquared(Kernel, sk_ExpSineSquared):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = self.length_scale
periodicity = self.periodicity
diff = x - X_train
sq_dist = np.sum(diff**2, axis=1)
dist = np.sqrt(sq_dist)
pi_by_period = dist * (np.pi / periodicity)
sine = np.sin(pi_by_period) / length_scale
sine_squared = -2 * sine**2
exp_sine_squared = np.exp(sine_squared)
grad_wrt_exp = -2 * np.sin(2 * pi_by_period) / length_scale**2
# When x_i -> y_i for all i in [0, D), the gradient becomes
# zero. See https://github.com/MechCoder/Notebooks/blob/master/ExpSineSquared%20Kernel%20gradient%20computation.ipynb
# for a detailed math explanation
# grad_wrt_theta can be anything since diff is zero
# for this corner case, hence we set to zero.
grad_wrt_theta = np.zeros_like(dist)
nzd = dist != 0.0
grad_wrt_theta[nzd] = np.pi / (periodicity * dist[nzd])
return np.expand_dims(
grad_wrt_theta * exp_sine_squared * grad_wrt_exp, axis=1) * diff
class ConstantKernel(Kernel, sk_ConstantKernel):
def gradient_x(self, x, X_train):
return np.zeros_like(X_train)
class WhiteKernel(Kernel, sk_WhiteKernel):
def gradient_x(self, x, X_train):
return np.zeros_like(X_train)
class Exponentiation(Kernel, sk_Exponentiation):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
expo = self.exponent
kernel = self.kernel
K = np.expand_dims(
kernel(np.expand_dims(x, axis=0), X_train)[0], axis=1)
return expo * K ** (expo - 1) * kernel.gradient_x(x, X_train)
class Sum(Kernel, sk_Sum):
def gradient_x(self, x, X_train):
return (
self.k1.gradient_x(x, X_train) +
self.k2.gradient_x(x, X_train)
)
class Product(Kernel, sk_Product):
def gradient_x(self, x, X_train):
x = np.asarray(x)
x = np.expand_dims(x, axis=0)
X_train = np.asarray(X_train)
f_ggrad = (
np.expand_dims(self.k1(x, X_train)[0], axis=1) *
self.k2.gradient_x(x, X_train)
)
fgrad_g = (
np.expand_dims(self.k2(x, X_train)[0], axis=1) *
self.k1.gradient_x(x, X_train)
)
return f_ggrad + fgrad_g
class DotProduct(Kernel, sk_DotProduct):
def gradient_x(self, x, X_train):
return np.asarray(X_train)
class HammingKernel(sk_StationaryKernelMixin, sk_NormalizedKernelMixin, Kernel):
"""
The HammingKernel is used to handle categorical inputs.
``K(x_1, x_2) = exp(\sum_{j=1}^{d} -ls_j * (I(x_1j != x_2j)))``
Parameters
-----------
* `length_scale` [float, array-like, shape=[n_features,], 1.0 (default)]
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
* `length_scale_bounds` [array-like, [1e-5, 1e5] (default)]
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def hyperparameter_length_scale(self):
length_scale = self.length_scale
anisotropic = np.iterable(length_scale) and len(length_scale) > 1
if anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
* `X` [array-like, shape=(n_samples_X, n_features)]
Left argument of the returned kernel k(X, Y)
* `Y` [array-like, shape=(n_samples_Y, n_features) or None(default)]
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
* `eval_gradient` [bool, False(default)]
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
* `K` [array-like, shape=(n_samples_X, n_samples_Y)]
Kernel k(X, Y)
* `K_gradient` [array-like, shape=(n_samples_X, n_samples_X, n_dims)]
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
length_scale = self.length_scale
anisotropic = np.iterable(length_scale) and len(length_scale) > 1
if np.iterable(length_scale):
if len(length_scale) > 1:
length_scale = np.asarray(length_scale, dtype=np.float)
else:
length_scale = float(length_scale[0])
else:
length_scale = float(length_scale)
X = np.atleast_2d(X)
if anisotropic and X.shape[1] != len(length_scale):
raise ValueError(
"Expected X to have %d features, got %d" %
(X.shape, len(length_scale)))
n_samples, n_dim = X.shape
Y_is_None = Y is None
if Y_is_None:
Y = X
elif eval_gradient:
raise ValueError("gradient can be evaluated only when Y != X")
else:
Y = np.atleast_2d(Y)
indicator = np.expand_dims(X, axis=1) != Y
kernel_prod = np.exp(-np.sum(length_scale * indicator, axis=2))
# dK / d theta = (dK / dl) * (dl / d theta)
# theta = log(l) => dl / d (theta) = e^theta = l
# dK / d theta = l * dK / dl
# dK / dL computation
if anisotropic:
grad = -np.expand_dims(kernel_prod, axis=-1) * np.array(indicator, dtype=np.float32)
else:
grad = -np.expand_dims(kernel_prod * np.sum(indicator, axis=2), axis=-1)
grad *= length_scale
if eval_gradient:
return kernel_prod, grad
return kernel_prod
|
bsd-3-clause
|
B3AU/waveTree
|
sklearn/feature_extraction/hashing.py
|
29
|
5648
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : NumPy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
|
bsd-3-clause
|
yunfeilu/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
267
|
6813
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
bsd-3-clause
|
subutai/htmresearch
|
projects/sequence_prediction/discrete_sequences/plotRepeatedPerturbExperiment.py
|
6
|
14424
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot sequence prediction & perturbation experiment result
"""
import os
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
from plot import movingAverage
from plot import computeAccuracy
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
def loadExperiment(experiment):
print "Loading experiment ", experiment
data = readExperiment(experiment)
(accuracy, x) = computeAccuracy(data['predictions'],
data['truths'],
data['iterations'],
resets=data['resets'],
randoms=data['randoms'])
return (accuracy, x)
def calculateMeanStd(accuracyAll):
numRepeats = len(accuracyAll)
numLength = min([len(a) for a in accuracyAll])
accuracyMat = np.zeros(shape=(numRepeats, numLength))
for i in range(numRepeats):
accuracyMat[i, :] = accuracyAll[i][:numLength]
meanAccuracy = np.mean(accuracyMat, axis=0)
stdAccuracy = np.std(accuracyMat, axis=0)
return (meanAccuracy, stdAccuracy)
def plotWithErrBar(x, y, error, color):
plt.fill_between(x, y-error, y+error,
alpha=0.3, edgecolor=color, facecolor=color)
plt.plot(x, y, color, color=color, linewidth=4)
plt.ylabel('Prediction Accuracy')
plt.xlabel(' Number of elements seen')
def analyzeResult(x, accuracy, perturbAt=10000, movingAvg=True, smooth=True):
if movingAvg:
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
x = np.array(x)
accuracy = np.array(accuracy)
if smooth:
# perform smoothing convolution
mask = np.ones(shape=(100,))
mask = mask/np.sum(mask)
# extend accuracy vector to eliminate boundary effect of convolution
accuracy = np.concatenate((accuracy, np.ones((200, ))*accuracy[-1]))
accuracy = np.convolve(accuracy, mask, 'same')
accuracy = accuracy[:len(x)]
perturbAtX = np.where(x > perturbAt)[0][0]
finalAccuracy = accuracy[perturbAtX-len(mask)/2]
learnTime = min(np.where(np.logical_and(accuracy > finalAccuracy * 0.95,
x < x[perturbAtX - len(mask)/2-1]))[0])
learnTime = x[learnTime]
finalAccuracyAfterPerturbation = accuracy[-1]
learnTimeAfterPerturbation = min(np.where(
np.logical_and(accuracy > finalAccuracyAfterPerturbation * 0.95,
x > x[perturbAtX + len(mask)]))[0])
learnTimeAfterPerturbation = x[learnTimeAfterPerturbation] - perturbAt
result = {"finalAccuracy": finalAccuracy,
"learnTime": learnTime,
"finalAccuracyAfterPerturbation": finalAccuracyAfterPerturbation,
"learnTimeAfterPerturbation": learnTimeAfterPerturbation}
return result
if __name__ == '__main__':
try:
# Load raw experiment results
# You have to run the experiments
# In ./tm/
# python tm_suite.py --experiment="high-order-distributed-random-perturbed" -d
# In ./lstm/
# python suite.py --experiment="high-order-distributed-random-perturbed" -d
expResults = {}
expResultsAnaly = {}
# HTM
tmResults = os.path.join("tm/results",
"high-order-distributed-random-perturbed")
accuracyAll = []
exptLabel = 'HTM'
expResultsAnaly[exptLabel] = []
for seed in range(10):
experiment = os.path.join(tmResults,
"seed" + "{:.1f}".format(seed), "0.log")
(accuracy, x) = loadExperiment(experiment)
expResultsAnaly[exptLabel].append(analyzeResult(x, accuracy))
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
accuracyAll.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyAll)
x = x[:len(meanAccuracy)]
expResults[exptLabel] = {
'x': x, 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
# TDNN
tdnnResults = os.path.join("tdnn/results",
"high-order-distributed-random-perturbed")
accuracyAll = []
exptLabel = 'TDNN'
expResultsAnaly[exptLabel] = []
for seed in range(20):
experiment = os.path.join(tdnnResults,
"seed" + "{:.1f}".format(
seed) + "learning_window3000.0", "0.log")
(accuracy, x) = loadExperiment(experiment)
expResultsAnaly[exptLabel].append(analyzeResult(x, accuracy))
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
accuracyAll.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyAll)
x = x[:len(meanAccuracy)]
expResults[exptLabel] = {
'x': x, 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
tdnnResults = os.path.join("tdnn/results",
"high-order-distributed-random-perturbed-long-window")
accuracyAll = []
exptLabel = 'TDNN-long'
expResultsAnaly[exptLabel] = []
for seed in range(8):
experiment = os.path.join(tdnnResults,
"seed" + "{:.1f}".format(
seed) + "learning_window3000.0", "0.log")
(accuracy, x) = loadExperiment(experiment)
expResultsAnaly[exptLabel].append(analyzeResult(x, accuracy))
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
accuracyAll.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyAll)
x = x[:len(meanAccuracy)]
expResults[exptLabel] = {
'x': x, 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
tdnnResults = os.path.join("tdnn/results",
"high-order-distributed-random-perturbed-short-window")
accuracyAll = []
exptLabel = 'TDNN-short'
expResultsAnaly[exptLabel] = []
for seed in range(8):
experiment = os.path.join(tdnnResults,
"seed" + "{:.1f}".format(
seed) + "learning_window3000.0", "0.log")
(accuracy, x) = loadExperiment(experiment)
expResultsAnaly[exptLabel].append(analyzeResult(x, accuracy))
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
accuracyAll.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyAll)
x = x[:len(meanAccuracy)]
expResults[exptLabel] = {
'x': x, 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
# ELM
elmResults = os.path.join("elm/results",
"high-order-distributed-random-perturbed")
accuracyAll = []
exptLabel = 'ELM'
expResultsAnaly[exptLabel] = []
for seed in range(10, 20):
experiment = os.path.join(elmResults,
"seed" + "{:.1f}".format(seed), "0.log")
(accuracy, x) = loadExperiment(experiment)
expResultsAnaly[exptLabel].append(analyzeResult(x, accuracy))
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
accuracyAll.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyAll)
x = x[:len(meanAccuracy)]
expResults[exptLabel] = {
'x': x, 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
# LSTM
lstmResults = os.path.join("lstm/results",
"high-order-distributed-random-perturbed")
for learningWindow in [1000.0, 3000.0, 9000.0]:
accuracyAll = []
exptLabel = 'LSTM-'+"{:.0f}".format(learningWindow)
expResultsAnaly[exptLabel] = []
for seed in range(20):
experiment = os.path.join(
lstmResults, "seed{:.1f}learning_window{:.1f}".format(seed, learningWindow),
"0.log")
(accuracy, x) = loadExperiment(experiment)
expResultsAnaly[exptLabel].append(analyzeResult(x, accuracy))
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
accuracyAll.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyAll)
expResults[exptLabel] = {
'x': x, 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
# online- LSTM
lstmResults = os.path.join("lstm/results",
"high-order-distributed-random-perturbed-online")
for learningWindow in [100.0]:
accuracyAll = []
exptLabel = 'LSTM-online'+"{:.0f}".format(learningWindow)
expResultsAnaly[exptLabel] = []
for seed in range(10):
experiment = os.path.join(
lstmResults, "seed{:.1f}learning_window{:.1f}".format(seed, learningWindow),
"0.log")
(accuracy, x) = loadExperiment(experiment)
expResultsAnaly[exptLabel].append(analyzeResult(x, accuracy))
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
accuracyAll.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyAll)
x = x[:len(meanAccuracy)]
expResults[exptLabel] = {
'x': x, 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
output = open('./result/ContinuousLearnExperiment.pkl', 'wb')
pickle.dump(expResults, output, -1)
output.close()
output = open('./result/ContinuousLearnExperimentAnaly.pkl', 'wb')
pickle.dump(expResultsAnaly, output, -1)
output.close()
except:
print "Cannot find raw experiment results"
print "Plot using saved processed experiment results"
expResults = pickle.load(open('./result/ContinuousLearnExperiment.pkl', 'rb'))
expResultsAnaly = pickle.load(open('./result/ContinuousLearnExperimentAnaly.pkl', 'rb'))
plt.figure(1)
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=True)
colorList = {"HTM": "r", "ELM": "b", "LSTM-1000": "y", "LSTM-9000": "g",
"TDNN": "c", "LSTM-online100": "m"}
modelList = ['HTM', 'ELM', 'TDNN', 'LSTM-1000', 'LSTM-9000', 'LSTM-online100']
for model in modelList:
expResult = expResults[model]
plt.figure(1)
plotWithErrBar(expResult['x'],
expResult['meanAccuracy'], expResult['stdAccuracy'],
colorList[model])
perturbAtX = np.where(np.array(expResult['x']) > 10000)[0][0]
result = analyzeResult(expResult['x'], expResult['meanAccuracy'], movingAvg=False)
resultub = analyzeResult(expResult['x'],
expResult['meanAccuracy']-expResult['stdAccuracy'],
movingAvg=False)
resultlb = analyzeResult(expResult['x'],
expResult['meanAccuracy']+expResult['stdAccuracy'],
movingAvg=False)
learnTimeErr = [result['learnTime']-resultlb['learnTime'],
resultub['learnTime']-result['learnTime']]
learnTimeErrAfterPerturb = [
result['learnTimeAfterPerturbation']-resultlb['learnTimeAfterPerturbation'],
resultub['learnTimeAfterPerturbation']-result['learnTimeAfterPerturbation']]
axs[0].errorbar(x=result['learnTime'], y=result['finalAccuracy'],
yerr=expResult['stdAccuracy'][perturbAtX],
xerr=np.mean(learnTimeErr), ecolor=colorList[model])
axs[1].errorbar(x=result['learnTimeAfterPerturbation'],
y=result['finalAccuracyAfterPerturbation'],
yerr=expResult['stdAccuracy'][-1],
xerr=np.mean(learnTimeErrAfterPerturb),
ecolor=colorList[model])
axs[0].set_title("Before modification")
axs[1].set_title("After modification")
plt.figure(1)
plt.legend(modelList, loc=4)
retrainLSTMAt = np.arange(start=1000, stop=20000, step=1000)
for line in retrainLSTMAt:
plt.axvline(line, color='orange')
plt.axvline(10000, color='black')
plt.ylim([-0.05, 1.05])
plt.xlim([0, 20000])
for ax in axs:
ax.legend(modelList, loc=4)
ax.set_xlabel(' Number of samples required to achieve final accuracy')
ax.set_ylabel(' Final accuracy ')
ax.set_ylim([0.5, 1.05])
# axs[1].set_xlim([0, 30000])
axs[0].set_xlim([0, 10000])
axs[1].set_xlim([0, 10000])
plt.figure(1)
plt.savefig('./result/model_performance_high_order_prediction.pdf')
plt.figure(2)
plt.savefig('./result/model_performance_summary_high_order_prediction.pdf')
#
# # plot accuracy vs
# plt.figure(2)
# plt.figure(3)
# for model in ['HTM', 'LSTM-1000', 'LSTM-3000', 'LSTM-9000']:
# finalAccuracy = []
# finalAccuracyAfterPerturbation = []
# learnTime = []
# learnTimeAfterPerturbation = []
#
# for result in expResultsAnaly[model]:
# finalAccuracy.append(result['finalAccuracy'])
# finalAccuracyAfterPerturbation.append(result['finalAccuracyAfterPerturbation'])
# learnTime.append(result['learnTime'])
# learnTimeAfterPerturbation.append(result['learnTimeAfterPerturbation'])
#
# plt.figure(2)
# plt.errorbar(x=np.mean(learnTime), y=np.mean(finalAccuracy),
# yerr=np.std(finalAccuracy), xerr=np.std(learnTime))
#
# plt.figure(3)
# plt.errorbar(x=np.mean(learnTimeAfterPerturbation),
# y=np.mean(finalAccuracyAfterPerturbation),
# yerr=np.std(finalAccuracyAfterPerturbation),
# xerr=np.std(learnTimeAfterPerturbation))
#
# for fig in [2, 3]:
# plt.figure(fig)
# plt.legend(['HTM', 'LSTM-1000', 'LSTM-3000', 'LSTM-9000'], loc=3)
# plt.xlabel(' Number of sequences required to achieve final accuracy')
# plt.ylabel(' Final accuracy ')
|
agpl-3.0
|
smartscheduling/scikit-learn-categorical-tree
|
examples/bicluster/plot_spectral_coclustering.py
|
276
|
1736
|
"""
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
doc/conf.py
|
15
|
8446
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2016, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
Akshay0724/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
robin-lai/scikit-learn
|
examples/svm/plot_custom_kernel.py
|
171
|
1546
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
ChanderG/scikit-learn
|
examples/linear_model/plot_omp.py
|
385
|
2263
|
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
|
bsd-3-clause
|
vibhorag/scikit-learn
|
examples/linear_model/plot_bayesian_ridge.py
|
248
|
2588
|
"""
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
DuCorey/bokeh
|
bokeh/util/serialization.py
|
2
|
11282
|
'''
Functions for helping with serialization and deserialization of
Bokeh objects.
Certain NunPy array dtypes can be serialized to a binary format for
performance and efficiency. The list of supported dtypes is:
{binary_array_types}
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import base64
import datetime as dt
import math
from six import iterkeys
import numpy as np
from .string import format_docstring
from .dependencies import import_optional
pd = import_optional('pandas')
BINARY_ARRAY_TYPES = set([
np.dtype(np.float32),
np.dtype(np.float64),
np.dtype(np.uint8),
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.int16),
np.dtype(np.uint32),
np.dtype(np.int32),
])
DATETIME_TYPES = set([
dt.datetime,
dt.timedelta,
dt.date,
dt.time,
np.datetime64,
np.timedelta64
])
if pd:
try:
_pd_timestamp = pd.Timestamp
except AttributeError:
_pd_timestamp = pd.tslib.Timestamp
DATETIME_TYPES.add(_pd_timestamp)
DATETIME_TYPES.add(pd.Timedelta)
NP_EPOCH = np.datetime64(0, 'ms')
NP_MS_DELTA = np.timedelta64(1, 'ms')
DT_EPOCH = dt.datetime.utcfromtimestamp(0)
__doc__ = format_docstring(__doc__, binary_array_types="\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
_simple_id = 1000
_dt_tuple = tuple(DATETIME_TYPES)
def is_datetime_type(obj):
''' Whether an object is any date, datetime, or time delta type
recognized by Bokeh.
Arg:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a datetime type
'''
return isinstance(obj, _dt_tuple)
def convert_datetime_type(obj):
''' Convert any recognized date, datetime or time delta value to
floating point milliseconds
Date and Datetime values are converted to milliseconds since epoch.
TimeDeleta values are converted to absolute milliseconds.
Arg:
obj (object) : the object to convert
Returns:
float : milliseconds
'''
# Pandas Timestamp
if pd and isinstance(obj, _pd_timestamp): return obj.value / 10**6.0
# Pandas Timedelta
elif pd and isinstance(obj, pd.Timedelta): return obj.value / 10**6.0
# Datetime (datetime is a subclass of date)
elif isinstance(obj, dt.datetime):
diff = obj.replace(tzinfo=None) - DT_EPOCH
return diff.total_seconds() * 1000. + obj.microsecond / 1000.
# Timedelta (timedelta is class in the datetime library)
elif isinstance(obj, dt.timedelta):
return obj.total_seconds() * 1000.
# Date
elif isinstance(obj, dt.date):
return (dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000
# NumPy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - NP_EPOCH
return (epoch_delta / NP_MS_DELTA)
# Numpy timedelta64
elif isinstance(obj, np.timedelta64):
return (obj / NP_MS_DELTA)
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
def make_id():
''' Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
'''
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def array_encoding_disabled(array):
''' Determine whether an array may be binary encoded.
The NumPy array dtypes that can be encoded are:
{binary_array_types}
Args:
array (np.ndarray) : the array to check
Returns:
bool
'''
# disable binary encoding for non-supported dtypes
return array.dtype not in BINARY_ARRAY_TYPES
array_encoding_disabled.__doc__ = format_docstring(array_encoding_disabled.__doc__,
binary_array_types="\n ".join("* ``np." + str(x) + "``"
for x in BINARY_ARRAY_TYPES))
def transform_array(array, force_list=False):
''' Transform a NumPy arrays into serialized format
Converts un-serializable dtypes and returns JSON serializable
format
Args:
array (np.ndarray) : a NumPy array to be transformed
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
Returns:
JSON
'''
# Check for astype failures (putative Numpy < 1.7)
try:
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
except AttributeError as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
import sys
# for compatibility with PyPy that doesn't have datetime64
if 'PyPy' in sys.version:
legacy_datetime64 = False
pass
else:
raise e
else:
raise e
# not quite correct, truncates to ms..
if array.dtype.kind == 'M':
if legacy_datetime64:
if array.dtype == np.dtype('datetime64[ns]'):
array = array.astype('int64') / 10**6.0
else:
array = array.astype('datetime64[us]').astype('int64') / 1000.
elif array.dtype.kind == 'm':
array = array.astype('timedelta64[us]').astype('int64') / 1000.
return serialize_array(array, force_list)
def transform_array_to_list(array):
''' Transforms a NumPy array into a list of values
Args:
array (np.nadarray) : the NumPy array series to transform
Returns:
list or dict
'''
if (array.dtype.kind in ('u', 'i', 'f') and (~np.isfinite(array)).any()):
transformed = array.astype('object')
transformed[np.isnan(array)] = 'NaN'
transformed[np.isposinf(array)] = 'Infinity'
transformed[np.isneginf(array)] = '-Infinity'
return transformed.tolist()
elif (array.dtype.kind == 'O' and pd and pd.isnull(array).any()):
transformed = array.astype('object')
transformed[pd.isnull(array)] = 'NaN'
return transformed.tolist()
return array.tolist()
def transform_series(series, force_list=False):
''' Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
Returns:
list or dict
'''
vals = series.values
return transform_array(vals, force_list)
def serialize_array(array, force_list=False):
''' Transforms a NumPy array into serialized form.
Args:
array (np.ndarray) : the NumPy array to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
Returns:
list or dict
'''
if isinstance(array, np.ma.MaskedArray):
array = array.filled(np.nan) # Set masked values to nan
if (array_encoding_disabled(array) or force_list):
return transform_array_to_list(array)
if not array.flags['C_CONTIGUOUS']:
array = np.ascontiguousarray(array)
return encode_base64_dict(array)
def traverse_data(obj, use_numpy=True):
''' Recursively traverse an object until a flat list is found.
If NumPy is available, the flat list is converted to a numpy array
and passed to transform_array() to handle ``nan``, ``inf``, and
``-inf``.
Otherwise, iterate through all items, converting non-JSON items
Args:
obj (list) : a list of values or lists
use_numpy (bool, optional) toggle NumPy as a dependency for testing
This argument is only useful for testing (default: True)
'''
if use_numpy and all(isinstance(el, np.ndarray) for el in obj):
return [transform_array(el) for el in obj]
obj_copy = []
for item in obj:
# Check the base/common case first for performance reasons
# Also use type(x) is float because it's faster than isinstance
if type(item) is float:
if math.isnan(item):
item = 'NaN'
elif math.isinf(item):
if item > 0:
item = 'Infinity'
else:
item = '-Infinity'
obj_copy.append(item)
elif isinstance(item, (list, tuple)): # check less common type second
obj_copy.append(traverse_data(item))
else:
obj_copy.append(item)
return obj_copy
def transform_column_source_data(data):
''' Transform ColumnSourceData data to a serialized format
Args:
data (dict) : the mapping of names to data columns to transform
Returns:
JSON compatible dict
'''
data_copy = {}
for key in iterkeys(data):
if pd and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
def encode_base64_dict(array):
''' Encode a NumPy array using base64:
The encoded format is a dict with the following structure:
.. code:: python
{
'__ndarray__' : << base64 encoded array data >>,
'shape' : << array shape >>,
'dtype' : << dtype name >>,
}
Args:
array (np.ndarray) : an array to encode
Returns:
dict
'''
return {
'__ndarray__' : base64.b64encode(array.data).decode('utf-8'),
'shape' : array.shape,
'dtype' : array.dtype.name
}
def decode_base64_dict(data):
''' Decode a base64 encoded array into a NumPy array.
Args:
data (dict) : encoded array data to decode
Data should have the format encoded by :func:`encode_base64_dict`.
Returns:
np.ndarray
'''
b64 = base64.b64decode(data['__ndarray__'])
array = np.fromstring(b64, dtype=data['dtype'])
if len(data['shape']) > 1:
array = array.reshape(data['shape'])
return array
|
bsd-3-clause
|
jm-begon/scikit-learn
|
sklearn/utils/tests/test_fixes.py
|
281
|
1829
|
# Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
|
bsd-3-clause
|
arcyfelix/ML-DL-AI
|
Supervised Learning/Image Recognition/Deep Inception/previous versions - WojNet/WojNet2.py
|
1
|
8703
|
# coding: utf-8
# In[1]:
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.layers.merge_ops import merge
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_utils import to_categorical
from tflearn.helpers.trainer import Trainer
from import_data import *
import matplotlib.pyplot as plt
# Acquiring the data
folder = 'Digit Recognizer'
file_name = 'train.csv'
specific_dataset_source = folder + '/' + file_name
output_columns = ['label']
data = import_csv(specific_dataset_source, shuffle = True)
x_data, y_data = get_xy_mutual(data, output_columns, type = 'numpy')
x_data = standalization_divide(x_data, 255)
get_info(x_data, 'input')
num_samples = x_data.shape[0]
input_features = x_data.shape[1]
number_of_labels = labels_info(y_data)
y_data_as_numbers = labels_as_numbers(y_data)
split_percentage = 80
x_train, x_val = cross_validation(x_data, split_percentage)
x_train = np.array(x_data[0:(int(x_data.shape[0]/(100/split_percentage)))])
x_val = np.array(x_data[(int(x_data.shape[0]/(100/split_percentage))):x_data.shape[0]])
y_train = np.array(y_data_as_numbers[0:(int(x_data.shape[0]/(100/split_percentage)))])
y_val = np.array(y_data_as_numbers[(int(x_data.shape[0]/(100/split_percentage))):x_data.shape[0]])
# --------------------------------------------------------------------------------------------------------
# In[2]:
get_ipython().magic('matplotlib inline')
plt.hist(y_train)
# In[3]:
get_ipython().magic('matplotlib inline')
plt.hist(y_val)
# In[4]:
print(y_train[:5])
# In[5]:
# Shaping data to the correct shape.
x_train = x_train.reshape([-1, 28, 28, 1])
x_val = x_val.reshape([-1, 28, 28, 1])
y_train = to_categorical(y_train, nb_classes = 10)
y_val = to_categorical(y_val, nb_classes = 10)
print('Size of the intput '+ str(x_data.shape))
print('Size of the output '+ str(y_data.shape))
print('First five examples of one-hot encoded output:')
print(y_train[:5, :])
# In[6]:
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
#
branch1 = conv_2d(network, 32, [2, 2], activation = 'relu', name = 'B1Conv2d_2x2')
branch1 = dropout(branch1, 0.5)
branch2 = conv_2d(network, 16, [3, 3], activation = 'relu', name = 'B2Conv2d_3x3')
branch2 = dropout(branch2, 0.5)
branch2 = conv_2d(branch2, 32, [2, 2], activation = 'relu', name = 'B2Conv2d_2x2')
branch2 = dropout(branch2, 0.5)
branch3 = conv_2d(network, 8, [5, 5], activation = 'relu', name = 'B3Conv2d_5x5')
branch3 = dropout(branch3, 0.5)
branch3 = conv_2d(branch3, 16, [3, 3], activation = 'relu', name = 'B3Conv2d_3x3')
branch3 = dropout(branch3, 0.5)
branch3 = conv_2d(branch3, 32, [2, 2], activation = 'relu', name = 'B3Conv2d_2x2')
branch3 = dropout(branch3, 0.5)
branch4 = conv_2d(network, 4, [7, 7], activation = 'relu', name = 'B4Conv2d_7x7')
branch4 = dropout(branch4, 0.5)
branch4 = conv_2d(branch4, 8, [5, 5], activation = 'relu', name = 'B4Conv2d_5x5')
branch4 = dropout(branch4, 0.5)
branch4 = conv_2d(branch4, 16, [3, 3], activation = 'relu', name = 'B4Conv2d_3x3')
branch4 = dropout(branch4, 0.5)
branch4 = conv_2d(branch4, 32, [2, 2], activation = 'relu', name = 'B4Conv2d_2x2')
branch4 = dropout(branch4, 0.5)
# Hidden layer 3
merged_layers = merge((branch1, branch2, branch3, branch4), mode = 'elemwise_sum', name = 'Merge')
# Hidden layer 4
merged_layersk = fully_connected(merged_layers, 10, activation='relu')
merged_layers = dropout(merged_layers, 0.5)
merged_layers = fully_connected(merged_layers, 10, activation = 'softmax')
network = regression(merged_layers, optimizer = 'adam', learning_rate = 0.005,
loss = 'categorical_crossentropy', name ='target')
# ---------------------------------------------------------------------------------------
# Training
# model = tflearn.DNN(network, tensorboard_verbose = 3, tensorboard_dir='./logs')
model = tflearn.DNN(network, tensorboard_verbose = 0, tensorboard_dir = './logs', best_checkpoint_path = './checkpoints/best/best_val', max_checkpoints = 1)
# checkpoint_path ='./checkpoints/checkpoint',
preTrained = False
if preTrained == False:
model.fit(x_train, y_train, n_epoch = 1, validation_set = (x_val, y_val),
show_metric = True, batch_size = 200, shuffle = True, #snapshot_step = 100,
snapshot_epoch = True, run_id = 'WojNet')
else:
# Loading the best accuracy checkpoint (accuracy over the validation data)
model.load('./checkpoints/best/best_val9723')
print('*' * 70)
print('Model is successfully loaded for the best performance!')
# In[7]:
#print('Training data:', model.evaluate(x_train, y_train))
#print('Validation data:',model.evaluate(x_val, y_val))
# In[1]:
k = 0 # Try different image indices k
#print("Label Prediction: %i"%test_labels[k])
fig = plt.figure(figsize=(2,2)); plt.axis('off')
plt.imshow(x_train[k,:,:,0]); plt.show()
# In[ ]:
file_name_test = 'test.csv'
folder = 'Digit Recognizer'
source = folder + '/' + file_name_test
data = pd.read_csv(source)
test_input = data.loc[:, :]
test_input_numpy = test_input.as_matrix()
test_input_numpy = test_input_numpy.reshape([-1,28,28,1])
# Standalization
test_input_standarized = test_input_numpy / 255
test_data_predicted = model.predict_label(test_input_standarized)
# Choosing the most probable label
test_data_predicted = test_data_predicted[:, -1]
# Indexing from 1 to number_of_examples
index = np.arange(1, test_data_predicted.shape[0] + 1)
# In[ ]:
test_input = data.loc[:, :]
# In[ ]:
test_input.shape
# In[ ]:
test_input.shape[0]
# In[ ]:
test_input_numpy = test_input.as_matrix()
# test_input_numpy = test_input_numpy.reshape([-1,28,28,1])
test_input_numpy = test_input_numpy.reshape([test_input.shape[0],28,28,1])
# In[ ]:
test_input_numpy.shape
# In[ ]:
test_input_numpy.shape[0]
# In[ ]:
# Standalization
test_input_standarized = test_input_numpy / 255
test_input_standarized.shape
# In[ ]:
current_example = test_input_standarized[0]
# In[ ]:
current_example.shape
# In[ ]:
test_data_predicted = np.empty((0, 10))
test_data_predicted_label = np.empty((0, 10))
for i in range (0, test_input_numpy.shape[0]):
current_example = test_input_standarized[i].reshape([-1,28,28,1])
test_data_predicted = np.append(test_data_predicted, model.predict(current_example), axis = 0)
test_data_predicted_label = np.append(test_data_predicted_label, model.predict_label(current_example), axis = 0)
if i%2000 == 0:
print(test_data_predicted_label[i])
print('Shape', test_data_predicted.shape)
# Choosing the most probable label
#test_data_predicted = test_data_predicted[:, -1]
# Indexing from 1 to number_of_examples
# In[ ]:
index = np.arange(1, test_data_predicted.shape[0] + 1, 1)
# In[ ]:
print(index)
# In[ ]:
testing_index = 10
# In[ ]:
current_example_woj = test_input_standarized[testing_index].reshape([-1,28,28,1])
example_by_woj = model.predict_label(current_example_woj)
# In[ ]:
print(example_by_woj)
# In[ ]:
print(test_data_predicted_label[testing_index])
# In[ ]:
test_data_predicted_label.shape
# In[ ]:
test_data_predicted_label.shape
# In[ ]:
test_data_predicted = test_data_predicted_label[:, -1]
# In[ ]:
test_data_predicted.shape
# In[ ]:
test_data_predicted[testing_index]
# In[ ]:
import matplotlib.pyplot as plt
# In[ ]:
get_ipython().magic('matplotlib inline')
plt.hist(test_data_predicted)
# In[ ]:
print(index)
# In[ ]:
# In[ ]:
print(test_data_predicted[:10])
# In[ ]:
col = ['ImageId', 'Label']
output_data = np.stack((index, test_data_predicted))
output_data = output_data.T
output_data = output_data.astype(int)
test_data_prediction = pd.DataFrame(output_data, columns=col)
predict_output = 'labels.csv'
predicted_output_path= folder + '/' + predict_output
test_data_prediction.to_csv(predicted_output_path, sep = ',', index = False)
print('The test data CSV file has been successfully uploaded!')
'''
model.predict_label(x_data.reshape([1,28,28,1])))
predicted_as_prob = np.array(model.predict(x_data[index].reshape([1,28,28,1])))
print('*' * 70)
print(predicted_as_prob)
print(predicted_as_label)
print(predicted_as_label[0, -1])
print(predicted_as_prob.max())
'''
'''
for index in range(11,15):
predicted_as_prob = np.array(model.predict(x_data[index].reshape([1,28,28,1])))
print('*' * 70)
print(type(predicted_as_prob))
print(predicted_as_prob.shape)
#print(predicted_as_prob)
#print(predicted_as_prob.max())
print(np.amax(predicted_as_prob))
'''
#tensorboard --logdir=logs/
# In[ ]:
|
apache-2.0
|
harisbal/pandas
|
pandas/tests/sparse/frame/conftest.py
|
4
|
3224
|
import pytest
import numpy as np
from pandas import SparseDataFrame, SparseArray, DataFrame, bdate_range
data = {'A': [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=np.float64),
'D': [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan]}
dates = bdate_range('1/1/2011', periods=10)
# fixture names must be compatible with the tests in
# tests/frame/test_api.SharedWithSparse
@pytest.fixture
def float_frame_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
return DataFrame(data, index=dates)
@pytest.fixture
def float_frame():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
# default_kind='block' is the default
return SparseDataFrame(data, index=dates, default_kind='block')
@pytest.fixture
def float_frame_int_kind():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D'] and default_kind='integer'.
Some entries are missing.
"""
return SparseDataFrame(data, index=dates, default_kind='integer')
@pytest.fixture
def float_string_frame():
"""
Fixture for sparse DataFrame of floats and strings with DatetimeIndex
Columns are ['A', 'B', 'C', 'D', 'foo']; some entries are missing
"""
sdf = SparseDataFrame(data, index=dates)
sdf['foo'] = SparseArray(['bar'] * len(dates))
return sdf
@pytest.fixture
def float_frame_fill0_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 0
return DataFrame(values, columns=['A', 'B', 'C', 'D'], index=dates)
@pytest.fixture
def float_frame_fill0():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 0
return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0, index=dates)
@pytest.fixture
def float_frame_fill2_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 2
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 2
return DataFrame(values, columns=['A', 'B', 'C', 'D'], index=dates)
@pytest.fixture
def float_frame_fill2():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 2
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 2
return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2, index=dates)
@pytest.fixture
def empty_frame():
"""
Fixture for empty SparseDataFrame
"""
return SparseDataFrame()
|
bsd-3-clause
|
zihua/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
93
|
3243
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
eliasrg/SURF2017
|
reference/hikmet.py
|
1
|
6092
|
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import convolve
import matplotlib.pyplot as plt
from tqdm import trange
import sys, os
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), 'code')))
from itertools import islice
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
from scipy.integrate import quad
from simulation import Simulation, Parameters
from measurements import Measurement
from plotting import plot_lloyd_max, plot_lloyd_max_tracker, \
plot_spiral, plot_spiral_decode
# Constants
RESOLUTION=1<<7
class Distribution:
def __init__(self, interval, pdf):
self.interval=interval
self.pdf=pdf
@classmethod
def bySamples(cls, x, fx): # Interpolate to get the pdf
# Use logarithmic interpolation to preserve log-concavity
dx=x[1]-x[0]
fx=np.array(fx, dtype = float) / sum(fx) / dx
Fx=np.cumsum(fx)*dx
v1 = sum(1 for i in Fx if i < 1e-5)
v2 = sum(1 for i in Fx if i < 1-1e-5)
x=x[v1:v2]
fx=fx[v1:v2]
fx=np.array(fx, dtype = float) / sum(fx) / dx
logfx=np.log(fx)
logpdf=interp1d(x, logfx, kind='linear',
bounds_error=False, fill_value=float('-inf'))
pdf = lambda t : np.exp(logpdf(t))
return cls((x[0],x[-1]), pdf)
def convolution(d1, d2):
a1,b1 = d1.interval
a2,b2 = d2.interval
delta = max(b1-a1,b2-a2) / float(RESOLUTION)
f1=[d1.pdf(i) for i in np.arange(a1,b1,delta)]
f2=[d2.pdf(i) for i in np.arange(a2,b2,delta)]
fx=convolve(f1, f2)
x=[a1+a2+delta*i for i in range(len(fx))]
return Distribution.bySamples(x, fx)
def LM(distribution, n):
# Some definitions
maxiter=1<<10
N=RESOLUTION
a,b = distribution.interval
x=np.linspace(a,b,N)
fx=np.array([distribution.pdf(i) for i in x])
fx[np.isnan(fx)]=0
dx=(b-a) / (N-1.)
Fx=np.cumsum(fx)*dx
index=lambda y: int(min(N-1, max(0, np.round((y-a) / float(dx)))))
# Initialization
c=np.zeros(n)
# p=np.array([x[i] for i in sorted(np.random.randint(0, N, n-1))])
p=np.array([x[int(i)] for i in np.round(np.linspace(0, N, num=n+1)[1:-1])])
# Loop
error=1
iteration=0
while error > 0 and iteration<maxiter:
iteration +=1
# centers from boundaries
pin=[0]+[index(i) for i in p]+[N-1]
for i in range(n):
c[i]=sum(x[j]*fx[j] for j in range(pin[i],pin[i+1]+1))\
/sum( fx[j] for j in range(pin[i],pin[i+1]+1))
pin_temp=pin
# boundaries from centers
p=(c[:-1]+c[1:]) / 2.
pin=[0]+[index(i) for i in p] + [N-1]
error=sum(abs(pin_temp[i]-pin[i]) for i in range(n+1))
return ([a]+list(p)+[b],c)
def plot_lloyd_max(distr, boundaries, levels, x_hit=None):
plt.figure()
plt.scatter(levels, np.zeros(len(levels)), color='red')
# plt.scatter(boundaries, np.zeros(len(boundaries)),
# color='purple', s=3)
for boundary in boundaries:
plt.plot([boundary, boundary], [-0.01, distr.pdf(boundary)], color='gray')
# plt.scatter([distr.mean()], [distr.pdf(distr.mean())], color='green')
if x_hit is not None: plt.scatter([x_hit], [0], marker='x')
a = max(distr.interval[0], -20)
b = min(distr.interval[1], 20)
x = np.linspace(a, b, num=10000)
plt.plot(x, distr.pdf(x))
# plt.xlim(-20, 20)
# plt.ylim(-0.05, 0.4)
plt.axis('tight')
def plot_lloyd_max_tracker(distr, boundaries, levels, d1, fw, x_hit=None):
plt.figure()
plt.scatter(levels, np.zeros(len(levels)), color='red')
# plt.scatter(boundaries, np.zeros(len(boundaries)),
# color='purple', s=3)
for boundary in boundaries:
plt.plot([boundary, boundary], [-0.01, distr.pdf(boundary)], color='gray')
# plt.scatter([distr.mean()], [distr.pdf(distr.mean())], color='green')
if x_hit is not None: plt.scatter([x_hit], [0], marker='x')
a = max(distr.interval[0], -20)
b = min(distr.interval[1], 20)
x = np.linspace(a, b, num=10000)
plt.plot(x, distr.pdf(x))
plt.plot(x, (d1.interval[0] <= x) * (x <= d1.interval[1]) * d1.pdf(x), color='orange')
plt.plot(x, fw.pdf(x), color='purple')
# plt.xlim(-20, 20)
# plt.ylim(-0.05, 0.4)
plt.axis('tight')
def show(delay=0):
if delay != 0:
from time import sleep
sleep(delay)
plt.show(block=False)
# m = Measurement.load('./data/separate/varying-SNR/noiseless--4.p')
# m.w[0] = m.x[0]
# w_sequence = m.get_noise_record().w_sequence
# Parameters
A = 1.5
W = 1.0 # wt ~ iid N(0,W)
T = 1<<8
# A = m.params.alpha
# W = m.params.W
# T = 1 << 7
# Definitions
fw = Distribution((-10,10),
lambda x : W * np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
)# pdf of w_t: N(0,W) with support (-10,10)
num_iter = 1<<9
avg=np.zeros(T)
LQG_avg = np.zeros(T)
for it in trange(num_iter):
# Initialization
l = 0
x = 0
u = 0
prior_dist = fw # prior pdf of x
e=[]
x2s = []
# Loop
for t in range(T):
w = W * np.random.randn() # should be W
# w = w_sequence.pop(0)
x = A * x + u + w
(p,c) = LM(prior_dist, 2)
# if t == 0:
# plot_lloyd_max(prior_dist, p, c, x_hit=x)
# else:
# plot_lloyd_max_tracker(prior_dist, p, c, d1, fw, x_hit=x)
l = sum(1 for i in p[1:-1] if i <= x) # encode
x_hat = c[l] # decode
u = - A * x_hat
d1 = Distribution((A*p[l]+u,A*p[l+1]+u), lambda x: prior_dist.pdf((x-u) / float(A)))
prior_dist = Distribution.convolution(d1, fw)
e+=[(x-x_hat)**2]
x2s += [x**2]
error=np.cumsum(e)/np.arange(T)
avg+=error/num_iter
LQG=np.cumsum(x2s)/np.arange(T)
LQG_avg += LQG/num_iter
LQGcosts = A**2 * avg + W
|
mit
|
fspaolo/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
7
|
3089
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_equal
from nose.tools import assert_true
from nose.tools import assert_false
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
v = DictVectorizer(sparse=sparse, dtype=dtype)
X = v.fit_transform(D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(D).A)
else:
assert_array_equal(X, v.transform(D))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
xubenben/scikit-learn
|
sklearn/neighbors/base.py
|
22
|
31143
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
|
bsd-3-clause
|
fengzhyuan/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
297
|
1247
|
# Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
|
bsd-3-clause
|
evidation-health/bokeh
|
bokeh/sampledata/gapminder.py
|
41
|
2655
|
from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
|
bsd-3-clause
|
larsmans/scikit-learn
|
sklearn/neighbors/unsupervised.py
|
16
|
3198
|
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
array([[2]])
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
|
bsd-3-clause
|
rbooth200/DiscEvolution
|
scripts/plot_planet_evo.py
|
1
|
1674
|
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import rcParams
rcParams['image.cmap'] = 'plasma'
from snap_reader import DiscReader, PlanetReader
from chemistry import SimpleCOMolAbund, SimpleCOAtomAbund
class Formatter(object):
def __init__(self, im):
self.im = im
def __call__(self, x, y):
z = self.im.get_array()[int(y), int(x)]
return 'x={:.01f}, y={:.01f}, z={:.01f}'.format(x, y, z)
if __name__ == "__main__":
import sys
DIR = os.path.join('../planets/TimeDep/irradiated/Rc_100/Mdot_1e-08/')
try:
DIR = sys.argv[1]
except IndexError:
pass
planets = PlanetReader(DIR, 'planets').compute_planet_evo()
tf = {}
for p in planets:
if p.t_form[0] not in tf:
tf[p.t_form[0]] = []
tf[p.t_form[0]].append(p)
N = len(tf)
i = 0
keys = sorted(tf.keys())
plt.ion()
while True:
plt.clf()
for p in tf[keys[i]]:
plt.subplot(311)
plt.loglog(p.time, p.M / 317.8)
plt.subplot(312)
plt.loglog(p.time, p.M_core)
plt.subplot(313)
plt.loglog(p.time, p.R)
plt.subplot(311)
plt.title('t_0 = {:g}yr'.format(p.t_form[0]))
plt.ylabel('$M\,[M_J]$')
plt.xlim(xmin=1e4)
plt.subplot(312)
plt.ylabel('$M_c\,[M_\oplus]$')
plt.xlim(xmin=1e4)
plt.subplot(313)
plt.xlabel('$t\,[\mathrm{yr}]$')
plt.ylabel('$R\,[\mathrm{au}]$')
plt.xlim(xmin=1e4)
plt.pause(1)
i = (i+1) % N
plt.show()
|
gpl-3.0
|
Ziqi-Li/bknqgis
|
pandas/pandas/tests/reshape/test_merge_asof.py
|
7
|
37720
|
import os
import pytest
import pytz
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.core.reshape.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(object):
def read_data(self, name, dedupe=False):
path = os.path.join(tm.get_data_path(), name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
def setup_method(self, method):
self.trades = self.read_data('trades.csv')
self.quotes = self.read_data('quotes.csv', dedupe=True)
self.asof = self.read_data('asof.csv')
self.tolerance = self.read_data('tolerance.csv')
self.allow_exact_matches = self.read_data('allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 3, 7]})
result = pd.merge_asof(left, right, on='a')
assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 6, np.nan]})
result = pd.merge_asof(left, right, on='a', direction='forward')
assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 6, 7]})
result = pd.merge_asof(left, right, on='a', direction='nearest')
assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
expected.ticker = expected.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index('time')
quotes = self.quotes
result = merge_asof(trades, quotes,
left_index=True,
right_on='time',
by='ticker')
# left-only index uses right's index, oddly
expected.index = result.index
# time column appears after left's columns
expected = expected[result.columns]
assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index('time')
result = merge_asof(trades, quotes,
left_on='time',
right_index=True,
by='ticker')
assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index('time')
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
result = merge_asof(trades, quotes,
left_index=True,
right_index=True,
by='ticker')
assert_frame_equal(result, expected)
def test_multi_index(self):
# MultiIndex is prohibited
trades = self.trades.set_index(['time', 'price'])
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_index=True,
right_index=True)
trades = self.trades.set_index('time')
quotes = self.quotes.set_index(['time', 'bid'])
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_index=True,
right_index=True)
def test_on_and_index(self):
# 'on' parameter and index together is prohibited
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_on='price',
left_index=True,
right_index=True)
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
right_on='bid',
left_index=True,
right_index=True)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
left_by='ticker',
right_by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'exch',
'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.045',
'20160525 13:30:00.049']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL'],
'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
'NSDQ', 'ARCA'],
'bid': [720.51, 51.95, 51.97, 51.99,
720.50, 97.99],
'ask': [720.92, 51.96, 51.98, 52.00,
720.93, 98.01]},
columns=['time', 'ticker', 'exch', 'bid', 'ask'])
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
columns=['time', 'ticker', 'exch',
'price', 'quantity', 'bid', 'ask'])
result = pd.merge_asof(trades, quotes, on='time',
by=['ticker', 'exch'])
assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': [0, 0, 1, 1, 2],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'exch',
'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.045',
'20160525 13:30:00.049']),
'ticker': [1, 0, 0, 0, 1, 2],
'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
'NSDQ', 'ARCA'],
'bid': [720.51, 51.95, 51.97, 51.99,
720.50, 97.99],
'ask': [720.92, 51.96, 51.98, 52.00,
720.93, 98.01]},
columns=['time', 'ticker', 'exch', 'bid', 'ask'])
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': [0, 0, 1, 1, 2],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
columns=['time', 'ticker', 'exch',
'price', 'quantity', 'bid', 'ask'])
result = pd.merge_asof(trades, quotes, on='time',
by=['ticker', 'exch'])
assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame([
[pd.to_datetime('20160602'), 1, 'a'],
[pd.to_datetime('20160602'), 2, 'a'],
[pd.to_datetime('20160603'), 1, 'b'],
[pd.to_datetime('20160603'), 2, 'b']],
columns=['time', 'k1', 'k2']).set_index('time')
right = pd.DataFrame([
[pd.to_datetime('20160502'), 1, 'a', 1.0],
[pd.to_datetime('20160502'), 2, 'a', 2.0],
[pd.to_datetime('20160503'), 1, 'b', 3.0],
[pd.to_datetime('20160503'), 2, 'b', 4.0]],
columns=['time', 'k1', 'k2', 'value']).set_index('time')
expected = pd.DataFrame([
[pd.to_datetime('20160602'), 1, 'a', 1.0],
[pd.to_datetime('20160602'), 2, 'a', 2.0],
[pd.to_datetime('20160603'), 1, 'b', 3.0],
[pd.to_datetime('20160603'), 2, 'b', 4.0]],
columns=['time', 'k1', 'k2', 'value']).set_index('time')
result = pd.merge_asof(left,
right,
left_index=True,
right_index=True,
by=['k1', 'k2'])
assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(left, right, left_index=True, right_index=True,
left_by=['k1', 'k2'], right_by=['k1'])
def test_basic2(self):
expected = self.read_data('asof2.csv')
trades = self.read_data('trades2.csv')
quotes = self.read_data('quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_on='time',
right_on='bid',
by='ticker')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on=['time', 'ticker'],
by='ticker')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
by='ticker')
def test_with_duplicates(self):
q = pd.concat([self.quotes, self.quotes]).sort_values(
['time', 'ticker']).reset_index(drop=True)
result = merge_asof(self.trades, q,
on='time',
by='ticker')
expected = self.read_data('asof.csv')
assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3]})
df2 = pd.DataFrame({'key': [1, 2, 2],
'right_val': [1, 2, 3]})
result = merge_asof(df1, df2, on='key')
expected = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3],
'right_val': [1, 1, 3]})
assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
allow_exact_matches='foo')
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1s'))
# integer
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1.0)
# invalid negative
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=-Timedelta('1s'))
with pytest.raises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=-1)
def test_non_sorted(self):
trades = self.trades.sort_values('time', ascending=False)
quotes = self.quotes.sort_values('time', ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
trades = self.trades.sort_values('time')
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
quotes = self.quotes.sort_values('time')
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes,
on='time',
by='ticker')
def test_tolerance(self):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1day'))
expected = self.tolerance
assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, np.nan, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, np.nan, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-02'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value1': np.arange(5)})
right = pd.DataFrame(
{'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-01'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value2': list("ABCDE")})
result = pd.merge_asof(left, right, on='date',
tolerance=pd.Timedelta('1 day'))
expected = pd.DataFrame(
{'date': pd.DatetimeIndex(start=pd.to_datetime('2016-01-02'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value1': np.arange(5),
'value2': list("BCDEE")})
assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index('time')
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
result = pd.merge_asof(trades, quotes,
left_index=True,
right_index=True,
by='ticker',
tolerance=pd.Timedelta('1day'))
assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
allow_exact_matches=False)
expected = self.allow_exact_matches
assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [2, 7, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [2, 3, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
tolerance=Timedelta('100ms'),
allow_exact_matches=False)
expected = self.allow_exact_matches_and_tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time')
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [2]})
assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False)
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [1]})
assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False,
tolerance=pd.Timedelta('10ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [np.nan]})
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030',
'2016-07-15 13:30:00.030']),
'username': ['bob', 'charlie']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False,
tolerance=pd.Timedelta('10ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030',
'2016-07-15 13:30:00.030']),
'username': ['bob', 'charlie'],
'version': [np.nan, np.nan]})
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 3, 4, 6, 11],
'right_val': [1, 3, 4, 6, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [np.nan, 6, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
allow_exact_matches=False, tolerance=1)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 3, 4, 6, 11],
'right_val': [1, 3, 4, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [np.nan, 4, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
allow_exact_matches=False, tolerance=1)
assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Y', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e']})
right = pd.DataFrame({'a': [1, 6, 11, 15, 16],
'b': ['X', 'Z', 'Y', 'Z', 'Y'],
'right_val': [1, 6, 11, 15, 16]})
expected = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Y', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e'],
'right_val': [1, np.nan, 11, 15, 16]})
result = pd.merge_asof(left, right, on='a', by='b',
direction='forward')
assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Z', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e']})
right = pd.DataFrame({'a': [1, 6, 11, 15, 16],
'b': ['X', 'Z', 'Z', 'Z', 'Y'],
'right_val': [1, 6, 11, 15, 16]})
expected = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Z', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e'],
'right_val': [1, 1, 11, 11, 16]})
result = pd.merge_asof(left, right, on='a', by='b',
direction='nearest')
assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.020',
'20160525 13:30:00.030',
'20160525 13:30:00.040',
'20160525 13:30:00.050',
'20160525 13:30:00.060']),
'key': [1, 2, 1, 3, 2],
'value1': [1.1, 1.2, 1.3, 1.4, 1.5]},
columns=['time', 'key', 'value1'])
df2 = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.015',
'20160525 13:30:00.020',
'20160525 13:30:00.025',
'20160525 13:30:00.035',
'20160525 13:30:00.040',
'20160525 13:30:00.055',
'20160525 13:30:00.060',
'20160525 13:30:00.065']),
'key': [2, 1, 1, 3, 2, 1, 2, 3],
'value2': [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8]},
columns=['time', 'key', 'value2'])
result = pd.merge_asof(df1, df2, on='time', by='key')
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.020',
'20160525 13:30:00.030',
'20160525 13:30:00.040',
'20160525 13:30:00.050',
'20160525 13:30:00.060']),
'key': [1, 2, 1, 3, 2],
'value1': [1.1, 1.2, 1.3, 1.4, 1.5],
'value2': [2.2, 2.1, 2.3, 2.4, 2.7]},
columns=['time', 'key', 'value1', 'value2'])
assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame({
'price': [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
'symbol': list("ABCDEFG")},
columns=['symbol', 'price'])
df2 = pd.DataFrame({
'price': [0.0, 1.0, 100.0],
'mpv': [0.0001, 0.01, 0.05]},
columns=['price', 'mpv'])
df1 = df1.sort_values('price').reset_index(drop=True)
result = pd.merge_asof(df1, df2, on='price')
expected = pd.DataFrame({
'symbol': list("BGACEDF"),
'price': [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
'mpv': [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05]},
columns=['symbol', 'price', 'mpv'])
assert_frame_equal(result, expected)
def test_on_specialized_type(self):
# GH13936
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64]:
df1 = pd.DataFrame({
'value': [5, 2, 25, 100, 78, 120, 79],
'symbol': list("ABCDEFG")},
columns=['symbol', 'value'])
df1.value = dtype(df1.value)
df2 = pd.DataFrame({
'value': [0, 80, 120, 125],
'result': list('xyzw')},
columns=['value', 'result'])
df2.value = dtype(df2.value)
df1 = df1.sort_values('value').reset_index(drop=True)
if dtype == np.float16:
with pytest.raises(MergeError):
pd.merge_asof(df1, df2, on='value')
continue
result = pd.merge_asof(df1, df2, on='value')
expected = pd.DataFrame(
{'symbol': list("BACEGDF"),
'value': [2, 5, 25, 78, 79, 100, 120],
'result': list('xxxxxyz')
}, columns=['symbol', 'value', 'result'])
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self):
# GH13936
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64]:
df1 = pd.DataFrame({
'value': [5, 2, 25, 100, 78, 120, 79],
'key': [1, 2, 3, 2, 3, 1, 2],
'symbol': list("ABCDEFG")},
columns=['symbol', 'key', 'value'])
df1.value = dtype(df1.value)
df2 = pd.DataFrame({
'value': [0, 80, 120, 125],
'key': [1, 2, 2, 3],
'result': list('xyzw')},
columns=['value', 'key', 'result'])
df2.value = dtype(df2.value)
df1 = df1.sort_values('value').reset_index(drop=True)
if dtype == np.float16:
with pytest.raises(MergeError):
pd.merge_asof(df1, df2, on='value', by='key')
else:
result = pd.merge_asof(df1, df2, on='value', by='key')
expected = pd.DataFrame({
'symbol': list("BACEGDF"),
'key': [2, 1, 3, 3, 2, 2, 1],
'value': [2, 5, 25, 78, 79, 100, 120],
'result': [np.nan, 'x', np.nan, np.nan, np.nan, 'y', 'x']},
columns=['symbol', 'key', 'value', 'result'])
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame({
'symbol': list("AAABBBCCC"),
'exch': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'price': [3.26, 3.2599, 3.2598, 12.58, 12.59,
12.5, 378.15, 378.2, 378.25]},
columns=['symbol', 'exch', 'price'])
df2 = pd.DataFrame({
'exch': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'price': [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
'mpv': [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0]},
columns=['exch', 'price', 'mpv'])
df1 = df1.sort_values('price').reset_index(drop=True)
df2 = df2.sort_values('price').reset_index(drop=True)
result = pd.merge_asof(df1, df2, on='price', by='exch')
expected = pd.DataFrame({
'symbol': list("AAABBBCCC"),
'exch': [3, 2, 1, 3, 1, 2, 1, 2, 3],
'price': [3.2598, 3.2599, 3.26, 12.5, 12.58,
12.59, 378.15, 378.2, 378.25],
'mpv': [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25]},
columns=['symbol', 'exch', 'price', 'mpv'])
assert_frame_equal(result, expected)
|
gpl-2.0
|
beepee14/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
267
|
6813
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
bsd-3-clause
|
nmartensen/pandas
|
asv_bench/benchmarks/timedelta.py
|
6
|
1054
|
from .pandas_vb_common import *
from pandas import to_timedelta, Timestamp
class ToTimedelta(object):
goal_time = 0.2
def setup(self):
self.arr = np.random.randint(0, 1000, size=10000)
self.arr2 = ['{0} days'.format(i) for i in self.arr]
self.arr3 = np.random.randint(0, 60, size=10000)
self.arr3 = ['00:00:{0:02d}'.format(i) for i in self.arr3]
self.arr4 = list(self.arr2)
self.arr4[-1] = 'apple'
def time_convert_int(self):
to_timedelta(self.arr, unit='s')
def time_convert_string(self):
to_timedelta(self.arr2)
def time_convert_string_seconds(self):
to_timedelta(self.arr3)
def time_convert_coerce(self):
to_timedelta(self.arr4, errors='coerce')
def time_convert_ignore(self):
to_timedelta(self.arr4, errors='ignore')
class Ops(object):
goal_time = 0.2
def setup(self):
self.td = to_timedelta(np.arange(1000000))
self.ts = Timestamp('2000')
def test_add_td_ts(self):
self.td + self.ts
|
bsd-3-clause
|
dawsonjon/FPGA-TX
|
psk31_encode.py
|
1
|
4482
|
varicodes = [
"1010101011", #00 NUL Null character
"1011011011", #01 SOH Start of Header
"1011101101", #02 STX Start of Text
"1101110111", #03 ETX End of Text
"1011101011", #04 EOT End of Transmission
"1101011111", #05 ENQ Enquiry
"1011101111", #06 ACK Acknowledgment
"1011111101", #07 BEL Bell
"1011111111", #08 BS Backspace
"11101111", #09 HT Horizontal Tab
"11101", #0A LF Line feed
"1101101111", #0B VT Vertical Tab
"1011011101", #0C FF Form feed
"11111", #0D CR Carriage return
"1101110101", #0E SO Shift Out
"1110101011", #0F SI Shift In
"1011110111", #10 DLE Data Link Escape
"1011110101", #11 DC1 Device Control 1 (XON)
"1110101101", #12 DC2 Device Control 2
"1110101111", #13 DC3 Device Control 3 (XOFF)
"1101011011", #14 DC4 Device Control 4
"1101101011", #15 NAK Negative Acknowledgement
"1101101101", #16 SYN Synchronous Idle
"1101010111", #17 ETB End of Trans. Block
"1101111011", #18 CAN Cancel
"1101111101", #19 EM End of Medium
"1110110111", #1A SUB Substitute
"1101010101", #1B ESC Escape
"1101011101", #1C FS File Separator
"1110111011", #1D GS Group Separator
"1011111011", #1E RS Record Separator
"1101111111", #1F US Unit Separator
"1", #20 SP
"111111111", #21 !
"101011111", #22 "
"111110101", #23 #
"111011011", #24 $
"1011010101", #25 %
"1010111011", #26 &
"101111111", #27 '
"11111011", #28 (
"11110111", #29 )
"101101111", #2A *
"111011111", #2B +
"1110101", #2C ,
"110101", #2D -
"1010111", #2E .
"110101111", #2F /
"10110111", #30 0
"10111101", #31 1
"11101101", #32 2
"11111111", #33 3
"101110111", #34 4
"101011011", #35 5
"101101011", #36 6
"110101101", #37 7
"110101011", #38 8
"110110111", #39 9
"11110101", #3A :
"110111101", #3B ;
"111101101", #3C <
"1010101", #3D =
"111010111", #3E >
"1010101111", #3F ?
"1010111101", #40 @
"1111101", #41 A
"11101011", #42 B
"10101101", #43 C
"10110101", #44 D
"1110111", #45 E
"11011011", #46 F
"11111101", #47 G
"101010101", #48 H
"1111111", #49 I
"111111101", #4A J
"101111101", #4B K
"11010111", #4C L
"10111011", #4D M
"11011101", #4E N
"10101011", #4F O
"11010101", #50 P
"111011101", #51 Q
"10101111", #52 R
"1101111", #53 S
"1101101", #54 T
"101010111", #55 U
"110110101", #56 V
"101011101", #57 W
"101110101", #58 X
"101111011", #59 Y
"1010101101", #5A Z
"111110111", #5B [
"111101111", #5C \
"111111011", #5D ]
"1010111111", #5E ^
"101101101", #5F _
"1011011111", #60 `
"1011", #61 a
"1011111", #62 b
"101111", #63 c
"101101", #64 d
"11", #65 e
"111101", #66 f
"1011011", #67 g
"101011", #68 h
"1101", #69 i
"111101011", #6A j
"10111111", #6B k
"11011", #6C l
"111011", #6D m
"1111", #6E n
"111", #6F o
"111111", #70 p
"110111111", #71 q
"10101", #72 r
"10111", #73 s
"101", #74 t
"110111", #75 u
"1111011", #76 v
"1101011", #77 w
"11011111", #78 x
"1011101", #79 y
"111010101", #7A z
"1010110111", #7B {
"110111011", #7C |
"1010110101", #7D }
"1011010111", #7E ~
"1110110101", #7F DEL
]
import struct
import sys
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
import scipy.io.wavfile as wav
def encode(message, sample_rate, baud_rate, subcarrier_frequency):
n = sample_rate//baud_rate
#varicode message
varicoded = "".join([varicodes[ord(i)]+"0000" for i in message])
varicoded = "1111110000" + varicoded + "000001111111"
#phase code
one = np.ones(n)
zero = np.concatenate([
np.cos(np.linspace(0, np.pi, n/2))+1,
np.cos(np.linspace(0, np.pi, n/2))-1
])*0.5
phase = 1
phase_codes = []
for i in varicoded:
if i=='1':
phase_codes.append(phase*one)
else:
phase_codes.append(phase*zero)
phase *= -1
phase_coded = np.concatenate(phase_codes)
#add subcarrier
t = np.arange(0, len(phase_coded))
subcarrier = np.sin(2*np.pi*t*subcarrier_frequency/sample_rate)
plt.plot(phase_coded*subcarrier)
plt.show()
return phase_coded * subcarrier
pcm_fp = encode("hello world", 8000, 31.25, 1000)*0.5
wav.write("psk.wav", 8000, pcm_fp)
|
mit
|
rudhir-upretee/Sumo_With_Netsim
|
tools/projects/TaxiFCD_Krieg/src/taxiQuantity/QuantityOverDay.py
|
3
|
2316
|
# -*- coding: Latin-1 -*-
"""
@file QuantityOverDay.py
@author Sascha Krieg
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-04-01
Counts for an given interval all unique taxis in an FCD file and draws the result as a bar chart.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from pylab import *
import datetime
from matplotlib.dates import MinuteLocator, HourLocator, DateFormatter
import util.Path as path
#global vars
intervalDelta=datetime.timedelta(minutes=60)
intervalDate=datetime.datetime( 2007, 7, 18,0,0 )
format="%Y-%m-%d %H:%M:%S"
barList={}
def main():
print "start program"
countTaxis()
#a figure (chart) where we add the bar's and change the axis properties
fig = figure()
ax = fig.add_subplot(111)
#set the width of the bar to interval-size
barWidth=date2num(intervalDate+intervalDelta)-date2num(intervalDate)
#add a bar with specified values and width
ax.bar(date2num(barList.keys()),barList.values(),width=barWidth)
#set the x-Axis to show the hours
ax.xaxis.set_major_locator(HourLocator())
ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
ax.xaxis.set_minor_locator(MinuteLocator())
ax.grid(True)
xlabel('Zeit (s)')
ylabel('Quantit'+u'\u00E4'+'t')
title('Menge der Taxis im VLS-Gebiet')
ax.autoscale_view()
#shows the text of the x-axis in a way that it looks nice
fig.autofmt_xdate()
#display the chart
show()
def countTaxis():
"""Analyzes the FCD and generates a list which is used to draw the bar chart."""
global barList
global intervalDate
taxis=set()
#intervalDate+=intervalDelta
inputFile=open(path.vls,'r')
for line in inputFile:
words=line.split("\t")
#if date >actual interval (used intervalDate strptime function to get String in a datetime-format)
if intervalDate+intervalDelta>intervalDate.strptime(words[0],format):
taxis.add(words[4])
#print words
else:
barList[intervalDate]=len(taxis)
intervalDate+=intervalDelta
taxis.clear()
#start the program
main()
|
gpl-3.0
|
tequa/ammisoft
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backends/backend_macosx.py
|
10
|
7837
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase, \
NavigationToolbar2, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.figure import Figure
from matplotlib import rcParams
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
from .backend_agg import RendererAgg, FigureCanvasAgg
class Show(ShowBase):
def mainloop(self):
_macosx.show()
show = Show()
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class TimerMac(_macosx.Timer, TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation
run loops for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
# completely implemented at the C-level (in _macosx.Timer)
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
_macosx.FigureCanvas.__init__(self, width, height)
self._device_scale = 1.0
def _set_device_scale(self, value):
if self._device_scale != value:
self.figure.dpi = self.figure.dpi / self._device_scale * value
self._device_scale = value
def get_renderer(self, cleared=False):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try:
self._lastKey, self._renderer
except AttributeError:
need_new_renderer = True
else:
need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self._renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
elif cleared:
self._renderer.clear()
return self._renderer
def _draw(self):
renderer = self.get_renderer()
if not self.figure.stale:
return renderer
self.figure.draw(renderer)
return renderer
def draw(self):
self.invalidate()
def draw_idle(self, *args, **kwargs):
self.invalidate()
def blit(self, bbox):
self.invalidate()
def resize(self, width, height):
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width * self._device_scale,
height * self._device_scale,
forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerMac(*args, **kwargs)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
if matplotlib.is_interactive():
self.show()
self.canvas.draw_idle()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1))
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self, *args):
filename = _macosx.choose_save_file('Save the figure',
self.canvas.get_default_filename())
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
def dynamic_update(self):
self.canvas.draw_idle()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasMac
FigureManager = FigureManagerMac
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/dtypes/cast/test_downcast.py
|
2
|
2790
|
import decimal
import numpy as np
import pytest
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas import DatetimeIndex, Series, Timestamp
import pandas._testing as tm
@pytest.mark.parametrize(
"arr,dtype,expected",
[
(
np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]),
"infer",
np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]),
),
(
np.array([8.0, 8.0, 8.0, 8.0, 8.9999999999995]),
"infer",
np.array([8, 8, 8, 8, 9], dtype=np.int64),
),
(
np.array([8.0, 8.0, 8.0, 8.0, 9.0000000000005]),
"infer",
np.array([8, 8, 8, 8, 9], dtype=np.int64),
),
(
# This is a judgement call, but we do _not_ downcast Decimal
# objects
np.array([decimal.Decimal(0.0)]),
"int64",
np.array([decimal.Decimal(0.0)]),
),
],
)
def test_downcast(arr, expected, dtype):
result = maybe_downcast_to_dtype(arr, dtype)
tm.assert_numpy_array_equal(result, expected)
def test_downcast_booleans():
# see gh-16875: coercing of booleans.
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
def test_downcast_conversion_no_nan(any_real_dtype):
dtype = any_real_dtype
expected = np.array([1, 2])
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, "infer")
tm.assert_almost_equal(result, expected, check_dtype=False)
def test_downcast_conversion_nan(float_dtype):
dtype = float_dtype
data = [1.0, 2.0, np.nan]
expected = np.array(data, dtype=dtype)
arr = np.array(data, dtype=dtype)
result = maybe_downcast_to_dtype(arr, "infer")
tm.assert_almost_equal(result, expected)
def test_downcast_conversion_empty(any_real_dtype):
dtype = any_real_dtype
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, "int64")
tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
@pytest.mark.parametrize("klass", [np.datetime64, np.timedelta64])
def test_datetime_likes_nan(klass):
dtype = klass.__name__ + "[ns]"
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, klass("NaT")], dtype)
res = maybe_downcast_to_dtype(arr, dtype)
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("as_asi", [True, False])
def test_datetime_with_timezone(as_asi):
# see gh-15426
ts = Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
exp = DatetimeIndex([ts, ts])
obj = exp.asi8 if as_asi else exp
res = maybe_downcast_to_dtype(obj, exp.dtype)
tm.assert_index_equal(res, exp)
|
bsd-3-clause
|
linebp/pandas
|
pandas/io/formats/format.py
|
1
|
86559
|
# -*- coding: utf-8 -*-
"""
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from __future__ import print_function
from distutils.version import LooseVersion
# pylint: disable=W0141
from textwrap import dedent
from pandas.core.dtypes.missing import isnull, notnull
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_float_dtype,
is_period_arraylike,
is_integer_dtype,
is_interval_dtype,
is_datetimetz,
is_integer,
is_float,
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_list_like)
from pandas.core.dtypes.generic import ABCSparseArray
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import (StringIO, lzip, range, map, zip, u,
OrderedDict, unichr)
from pandas.io.formats.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user,
_stringify_path)
from pandas.io.formats.printing import adjoin, justify, pprint_thing
from pandas.io.formats.common import get_level_lengths
import pandas.core.common as com
import pandas._libs.lib as lib
from pandas._libs.tslib import (iNaT, Timestamp, Timedelta,
format_array_from_datetime)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
import pandas as pd
import numpy as np
import itertools
import csv
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
%(header)s
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
index_names : bool, optional
Prints the names of the indexes, default True
line_width : int, optional
Width to wrap a line in characters, default no wrap"""
justify_docstring = """
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box."""
return_docstring = """
Returns
-------
formatted : string (or unicode, depending on data and options)"""
docstring_to_string = common_docstring + justify_docstring + return_docstring
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True, na_rep='NaN',
footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.length:
if footer:
footer += ', '
footer += "Length: %d" % len(self.categorical)
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None, na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = ['%s' % i for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[') + result + u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True, index=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.core.reshape.concat import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num],
series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: %s' % self.series.index.freqstr
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ("Name: %s" % series_name) if name is not None else ""
if (self.length is True or
(self.length == 'truncate' and self.truncate_v)):
if footer:
footer += ', '
footer += 'Length: %d' % len(self.series)
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: %s' % pprint_thing(name)
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
return format_array(self.tr_series._values, None,
float_format=self.float_format, na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode='center')[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values).replace('\n ',
'\n').strip()
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
class TextAdjustment(object):
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return compat.strlen(text, encoding=self.encoding)
def justify(self, texts, max_len, mode='right'):
return justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return adjoin(space, *lists, strlen=self.len,
justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super(EastAsianTextAdjustment, self).__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
def len(self, text):
return compat.east_asian_len(text, encoding=self.encoding,
ambiguous_width=self.ambiguous_width)
def justify(self, texts, max_len, mode='right'):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == 'left':
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == 'center':
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return (self.show_dimensions is True or
(self.show_dimensions == 'truncate' and self.is_truncated))
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += common_docstring + justify_docstring + return_docstring
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.', **kwds):
self.frame = frame
if buf is not None:
self.buf = _expand_user(_stringify_path(buf))
else:
self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = _ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
# (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = (self.header + dot_row + show_dimension_rows +
prompt_row)
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num],
frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :],
frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0),
adj=self.adj)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(self.columns), len(self.header))))
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(self.col_space or 0,
*(self.adj.len(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=header_colwidth,
adj=self.adj)
max_len = max(np.max([self.adj.len(x) for x in fmt_values]),
header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
# infer from column header
col_width = self.adj.len(strcols[self.tr_size_col][0])
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] *
(len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_mode = 'left'
elif is_dot_col:
cwidth = self.adj.len(strcols[self.tr_size_col][0])
dot_mode = 'center'
else:
dot_mode = 'right'
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s') %
(type(self.frame).__name__,
pprint_thing(frame.columns),
pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (not isinstance(self.max_cols, int) or
self.max_cols > 0): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
row_lens = Series(text).apply(len)
max_len_col_ix = np.argmax(row_lens)
max_len = row_lens[max_len_col_ix]
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See
# `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row
# plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max()
for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
if not self.index:
text = text.replace('\n ', '\n').strip()
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]" %
(len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x)
for x in idx]).max() + adjoin_width
col_widths = [np.array([self.adj.len(x) for x in col]).max() if
len(col) > 0 else 0 for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False, encoding=None,
multicolumn=False, multicolumn_format=None, multirow=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
latex_renderer = LatexFormatter(self, column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if encoding is None:
encoding = 'ascii' if compat.PY2 else 'utf-8'
if hasattr(self.buf, 'write'):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
import codecs
with codecs.open(self.buf, 'w', encoding=encoding) as f:
latex_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(frame.iloc[:, i]._values, formatter,
float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space, decimal=self.decimal)
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols,
notebook=notebook,
border=border)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any([l.is_floating for l in columns.levels])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (y not in self.formatters and
need_leadsp[x] and not restrict_formatting):
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x]
for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x if not self._get_formatter(i) and
need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns,
fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by
# to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, MultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names, formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(list(x), justify='left',
minimum=(self.col_space or 0),
adj=self.adj)) for x in fmt_index]
adjoined = self.adj.adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['%s' % x for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
class LatexFormatter(TableFormatter):
""" Used to render a DataFrame to a LaTeX tabular/longtable environment
output.
Parameters
----------
formatter : `DataFrameFormatter`
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
longtable : boolean, default False
Use a longtable environment instead of tabular.
See also
--------
HTMLFormatter
"""
def __init__(self, formatter, column_format=None, longtable=False,
multicolumn=False, multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.column_format = column_format
self.longtable = longtable
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s') %
(type(self.frame).__name__, self.frame.columns,
self.frame.index))
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.index.names)
cname = any(self.frame.columns.names)
lastcol = self.frame.index.nlevels - 1
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format()
blank = ' ' * len(lev2[0])
# display column names in last index-column
if cname and i == lastcol:
lev3 = [x if x else '{}' for x in self.frame.columns.names]
else:
lev3 = [blank] * clevels
if name:
lev3.append(lev.name)
for level_idx, group in itertools.groupby(
self.frame.index.labels[i]):
count = len(list(group))
lev3.extend([lev2[level_idx]] + [blank] * (count - 1))
strcols.insert(i, lev3)
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
% type(column_format))
if not self.longtable:
buf.write('\\begin{tabular}{%s}\n' % column_format)
buf.write('\\toprule\n')
else:
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if any(self.frame.index.names):
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write('\\midrule\n') # End of header
if self.longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{3}{r}{{Continued on next '
'page}} \\\\\n')
buf.write('\\midrule\n')
buf.write('\\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.fmt.kwds.get('escape', True):
# escape backslashes first
crow = [(x.replace('\\', '\\textbackslash').replace('_', '\\_')
.replace('%', '\\%').replace('$', '\\$')
.replace('#', '\\#').replace('{', '\\{')
.replace('}', '\\}').replace('~', '\\textasciitilde')
.replace('^', '\\textasciicircum').replace('&', '\\&')
if x else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if (i >= nlevels and self.fmt.index and self.multirow and
ilevels > 1):
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
def _format_multicolumn(self, row, ilevels):
"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = list(row[:ilevels])
ncol = 1
coltext = ''
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append('\\multicolumn{{{0:d}}}{{{1:s}}}{{{2:s}}}'
.format(ncol, self.multicolumn_format,
coltext.strip()))
# don't modify where not needed
else:
row2.append(coltext)
for c in row[ilevels:]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row, ilevels, i, rows):
"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(ilevels):
if row[j].strip():
nrow = 1
for r in rows[i + 1:]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = '\\multirow{{{0:d}}}{{*}}{{{1:s}}}'.format(
nrow, row[j].strip())
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\cline{{{0:d}-{1:d}}}\n'.format(cl[1], icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None,
notebook=False, border=None):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
self.notebook = notebook
if border is None:
border = get_option('html.border')
self.border = border
def write(self, s, indent=0):
rs = pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if self.fmt.col_space is not None and self.fmt.col_space > 0:
tags = (tags or "")
tags += 'style="min-width: %s;"' % self.fmt.col_space
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<%s %s>' % (kind, tags)
else:
start_tag = '<%s>' % kind
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict([('&', r'&'), ('<', r'<'),
('>', r'>')])
else:
esc = {}
rs = pprint_thing(s, escape_chars=esc).strip()
self.write('%s%s</%s>' % (start_tag, rs, kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
if tags is None:
tags = {}
if align is None:
self.write('<tr>', indent)
else:
self.write('<tr style="text-align: %s;">' % align, indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write('</tr>', indent)
def write_style(self):
template = dedent("""\
<style>
.dataframe thead tr:only-child th {
text-align: right;
}
.dataframe thead th {
text-align: left;
}
.dataframe tbody tr th {
vertical-align: top;
}
</style>""")
if self.notebook:
self.write(template)
def write_result(self, buf):
indent = 0
frame = self.frame
_classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise AssertionError('classes must be list or tuple, '
'not %s' % type(self.classes))
_classes.extend(self.classes)
if self.notebook:
div_style = ''
try:
import IPython
if IPython.__version__ < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except (ImportError, AttributeError):
pass
self.write('<div{0}>'.format(div_style))
self.write_style()
self.write('<table border="%s" class="%s">' % (self.border,
' '.join(_classes)),
indent)
indent += self.indent_delta
indent = self._write_header(indent)
indent = self._write_body(indent)
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
(len(frame), by, len(frame.columns)))
if self.notebook:
self.write('</div>')
_put_lines(buf, self.elements)
def _write_header(self, indent):
truncate_h = self.fmt.truncate_h
row_levels = self.frame.index.nlevels
if not self.fmt.header:
# write nothing
return indent
def _column_header():
if self.fmt.index:
row = [''] * (self.frame.index.nlevels - 1)
else:
row = []
if isinstance(self.columns, MultiIndex):
if self.fmt.has_column_names and self.fmt.index:
row.append(single_column_table(self.columns.names))
else:
row.append('')
style = "text-align: %s;" % self.fmt.justify
row.extend([single_column_table(c, self.fmt.justify, style)
for c in self.columns])
else:
if self.fmt.index:
row.append(self.columns.name or '')
row.extend(self.columns)
return row
self.write('<thead>', indent)
row = []
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
template = 'colspan="%d" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel, adjoin=False,
names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths,
levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (values[:ins_col] + (u('...'),) +
values[ins_col:])
else:
# sparse col headers do not receive a ...
values = (values[:ins_col] +
(values[ins_col - 1], ) +
values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = (values[:ins_col] + (u('...'),) +
values[ins_col:])
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = (values[:ins_col] + [u('...')] +
values[ins_col:])
name = self.columns.names[lnum]
row = [''] * (row_levels - 1) + ['' if name is None else
pprint_thing(name)]
if row == [""] and self.fmt.index is False:
row = []
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags,
header=True)
else:
col_row = _column_header()
align = self.fmt.justify
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
col_row.insert(ins_col, '...')
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
if all((self.fmt.has_index_names,
self.fmt.index,
self.fmt.show_index_names)):
row = ([x if x is not None else ''
for x in self.frame.index.names] +
[''] * min(len(self.columns), self.max_cols))
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
row.insert(ins_col, '')
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
self.write('</thead>', indent)
return indent
def _write_body(self, indent):
self.write('<tbody>', indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(min(len(self.frame), self.max_rows)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write('</tbody>', indent)
indent -= self.indent_delta
return indent
def _write_regular_rows(self, fmt_values, indent):
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
ncols = len(self.fmt.tr_frame.columns)
nrows = len(self.fmt.tr_frame)
fmt = self.fmt._get_formatter('__index__')
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta,
tags=None, nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + 1
row.insert(dot_col_ix, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="%d" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
ncols = len(frame.columns)
nrows = len(frame)
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False,
names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False,
names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u('...')
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(ins_row, tuple(
[u('...')] * len(level_lengths)))
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u('...')
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels - sparse_offset +
self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=tags,
nindex_levels=len(levels) - sparse_offset)
else:
for i in range(len(frame)):
idx_values = list(zip(*frame.index.format(
sparsify=False, adjoin=False, names=False)))
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
float_format=None, cols=None, header=True, index=True,
index_label=None, mode='w', nanRep=None, encoding=None,
compression=None, quoting=None, line_terminator='\n',
chunksize=None, tupleize_cols=False, quotechar='"',
date_format=None, doublequote=True, escapechar=None,
decimal='.'):
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = _expand_user(_stringify_path(path_or_buf))
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
self.compression = compression
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
self.tupleize_cols = tupleize_cols
self.has_mi_columns = (isinstance(obj.columns, MultiIndex) and
not self.tupleize_cols)
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if (isinstance(self.data_index, (DatetimeIndex, PeriodIndex)) and
date_format is not None):
self.data_index = Index([x.strftime(date_format) if notnull(x) else
'' for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
def save(self):
# create the writer & save
if hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
self._save()
finally:
if close:
f.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, MultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns:
encoded_labels += list(write_cols)
writer.writerow(encoded_labels)
else:
# write out the mi
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns._get_level_values(i))
writer.writerow(col_line)
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
if encoded_labels and set(encoded_labels) != set(['']):
encoded_labels.extend([''] * len(columns))
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right', decimal='.'):
if is_categorical_dtype(values):
fmt_klass = CategoricalArrayFormatter
elif is_interval_dtype(values):
fmt_klass = IntervalArrayFormatter
elif is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_period_arraylike(values):
fmt_klass = PeriodArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif is_datetimetz(values):
fmt_klass = Datetime64TZFormatter
elif is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format, formatter=formatter,
space=space, justify=justify, decimal=decimal)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right', decimal='.',
quoting=None, fixed_width=True):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = '%% .%dg' % get_option("display.precision")
float_format = lambda x: fmt_str % x
else:
float_format = self.float_format
formatter = (
self.formatter if self.formatter is not None else
(lambda x: pprint_thing(x, escape_chars=('\t', '\r', '\n'))))
def _format(x):
if self.na_rep is not None and lib.checknull(x):
if x is None:
return 'None'
elif x is pd.NaT:
return 'NaT'
return self.na_rep
elif isinstance(x, PandasObject):
return '%s' % x
else:
# object dtype
return '%s' % formatter(x)
vals = self.values
if isinstance(vals, Index):
vals = vals._values
elif isinstance(vals, ABCSparseArray):
vals = vals.values
is_float_type = lib.map_infer(vals, is_float) & notnull(vals)
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(' %s' % _format(v))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(' %s' % _format(v))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return (float_format % v) if notnull(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notnull(v) else self.na_rep
if self.decimal != '.':
def decimal_formatter(v):
return base_formatter(v).replace('.', self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notnull(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initalisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# separate the wheat from the chaff
values = self.values
mask = isnull(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = self.na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
return _trim_zeros(values, self.na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None and self.fixed_width:
float_format = '%% .%df' % self.digits
else:
float_format = self.float_format
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = '%% .%de' % self.digits
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self):
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '% d' % x)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values,
self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
return fmt_values.tolist()
class IntervalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
formatter = self.formatter or str
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
class PeriodArrayFormatter(IntArrayFormatter):
def _format_strings(self):
from pandas.core.indexes.period import IncompatibleFrequency
try:
values = PeriodIndex(self.values).to_native_types()
except IncompatibleFrequency:
# periods may contains different freq
values = Index(self.values, dtype='object').to_native_types()
formatter = self.formatter or (lambda x: '%s' % x)
fmt_values = [formatter(x) for x in values]
return fmt_values
class CategoricalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
fmt_values = format_array(self.values.get_values(), self.formatter,
float_format=self.float_format,
na_rep=self.na_rep, digits=self.digits,
space=self.space, justify=self.justify)
return fmt_values
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid='ignore'):
if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \
or not np.all(percentiles <= 1):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = (percentiles.astype(int) == percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + '%' for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(np.log10(np.min(
np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)
))).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + '%' for i in out]
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or lib.checknull(x):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.asobject
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = (self.formatter or
_get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box))
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(
consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = 'even_day'
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{0}'".format(result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None, adj=None):
if len(strings) == 0 or justify == 'all':
return strings
if adj is None:
adj = _get_adjustment()
max_len = np.max([adj.len(x) for x in strings])
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[:max_len - 3] + '...'
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all([x.endswith('0') for x in non_na]) and
not (any([('e' in x) or ('E' in x) for x in non_na])))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed]
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
table += (' align="%s"' % align)
if style is not None:
table += (' style="%s"' % style)
table += '><tbody>'
for i in column:
table += ('<tr><td>%s</td></tr>' % str(i))
table += '</tbody></table>'
return table
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
table += ('<td>%s</td>' % str(i))
table += '</tr></tbody></table>'
return table
def _has_names(index):
if isinstance(index, MultiIndex):
return any([x is not None for x in index.names])
else:
return index.name is not None
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return 'NaN'
if decimal.Decimal.is_infinite(dnum):
return 'inf'
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-%02d' % (-int_pow10)
else:
prefix = 'E+%02d' % int_pow10
mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("% g%s")
else:
format_str = (u("%% .%if%%s") % self.accuracy)
formatted = format_str % (mant, prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
|
bsd-3-clause
|
vkscool/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/path.py
|
69
|
20263
|
"""
Contains a class for managing paths (polylines).
"""
import math
from weakref import WeakValueDictionary
import numpy as np
from numpy import ma
from matplotlib._path import point_in_path, get_path_extents, \
point_in_path_collection, get_path_collection_extents, \
path_in_path, path_intersects_path, convert_path_to_polygons
from matplotlib.cbook import simple_linear_interpolation
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
to get the vertex/code pairs. This is important, since many
:class:`Path` objects, as an optimization, do not store a *codes*
at all, but have a default one provided for them by
:meth:`iter_segments`.
Note also that the vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 5 # 1 vertex
NUM_VERTICES = [1, 1, 1, 2, 3, 1]
code_type = np.uint8
def __init__(self, vertices, codes=None):
"""
Create a new path with the given vertices and codes.
*vertices* is an Nx2 numpy float array, masked array or Python
sequence.
*codes* is an N-length numpy array or Python sequence of type
:attr:`matplotlib.path.Path.code_type`.
These two arrays must have the same length in the first
dimension.
If *codes* is None, *vertices* will be treated as a series of
line segments.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
"""
if ma.isMaskedArray(vertices):
vertices = vertices.astype(np.float_).filled(np.nan)
else:
vertices = np.asarray(vertices, np.float_)
if codes is not None:
codes = np.asarray(codes, self.code_type)
assert codes.ndim == 1
assert len(codes) == len(vertices)
assert vertices.ndim == 2
assert vertices.shape[1] == 2
self.should_simplify = (len(vertices) >= 128 and
(codes is None or np.all(codes <= Path.LINETO)))
self.has_nonfinite = not np.isfinite(vertices).all()
self.codes = codes
self.vertices = vertices
#@staticmethod
def make_compound_path(*args):
"""
(staticmethod) Make a compound path from a list of Path
objects. Only polygons (not curves) are supported.
"""
for p in args:
assert p.codes is None
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = Path.LINETO * np.ones(total_length)
i = 0
for length in lengths:
codes[i] = Path.MOVETO
i += length
return Path(vertices, codes)
make_compound_path = staticmethod(make_compound_path)
def __repr__(self):
return "Path(%s, %s)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, simplify=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
If *simplify* is provided, it must be a tuple (*width*,
*height*) defining the size of the figure, in native units
(e.g. pixels or points). Simplification implies both removing
adjacent line segments that are very close to parallel, and
removing line segments outside of the figure. The path will
be simplified *only* if :attr:`should_simplify` is True, which
is determined in the constructor by this criteria:
- No curves
- More than 128 vertices
"""
vertices = self.vertices
if not len(vertices):
return
codes = self.codes
len_vertices = len(vertices)
isfinite = np.isfinite
NUM_VERTICES = self.NUM_VERTICES
MOVETO = self.MOVETO
LINETO = self.LINETO
CLOSEPOLY = self.CLOSEPOLY
STOP = self.STOP
if simplify is not None and self.should_simplify:
polygons = self.to_polygons(None, *simplify)
for vertices in polygons:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
elif codes is None:
if self.has_nonfinite:
next_code = MOVETO
for v in vertices:
if np.isfinite(v).all():
yield v, next_code
next_code = LINETO
else:
next_code = MOVETO
else:
yield vertices[0], MOVETO
for v in vertices[1:]:
yield v, LINETO
else:
i = 0
was_nan = False
while i < len_vertices:
code = codes[i]
if code == CLOSEPOLY:
yield [], code
i += 1
elif code == STOP:
return
else:
num_vertices = NUM_VERTICES[int(code)]
curr_vertices = vertices[i:i+num_vertices].flatten()
if not isfinite(curr_vertices).all():
was_nan = True
elif was_nan:
yield curr_vertices[-2:], MOVETO
was_nan = False
else:
yield curr_vertices, code
i += num_vertices
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`:
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes)
def contains_point(self, point, transform=None):
"""
Returns *True* if the path contains the given point.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return point_in_path(point[0], point[1], self, transform)
def contains_path(self, path, transform=None):
"""
Returns *True* if this path completely contains the given path.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from transforms import Bbox
if transform is not None:
transform = transform.frozen()
return Bbox(get_path_extents(self, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
from transforms import BboxTransformTo
rectangle = self.unit_rectangle().transformed(
BboxTransformTo(bbox))
result = self.intersects_path(rectangle, filled)
return result
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0):
"""
Convert this path to a list of polygons. Each polygon is an
Nx2 array of vertices. In other words, each polygon has no
``MOVETO`` instructions or curves. This is useful for
displaying in backends that do not support compound paths or
Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
if transform is None:
return [self.vertices]
else:
return [transform.transform(self.vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return convert_path_to_polygons(self, transform, width, height)
_unit_rectangle = None
#@classmethod
def unit_rectangle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]])
return cls._unit_rectangle
unit_rectangle = classmethod(unit_rectangle)
_unit_regular_polygons = WeakValueDictionary()
#@classmethod
def unit_regular_polygon(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
path = Path(verts)
cls._unit_regular_polygons[numVertices] = path
return path
unit_regular_polygon = classmethod(unit_regular_polygon)
_unit_regular_stars = WeakValueDictionary()
#@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
(staticmethod) Returns a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
path = Path(verts)
cls._unit_regular_polygons[(numVertices, innerCircle)] = path
return path
unit_regular_star = classmethod(unit_regular_star)
#@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
(staticmethod) Returns a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
unit_regular_asterisk = classmethod(unit_regular_asterisk)
_unit_circle = None
#@classmethod
def unit_circle(cls):
"""
(staticmethod) Returns a :class:`Path` of the unit circle.
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
np.float_)
codes = cls.CURVE4 * np.ones(26)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle = Path(vertices, codes)
return cls._unit_circle
unit_circle = classmethod(unit_circle)
#@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
(staticmethod) Returns an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
# degrees to radians
theta1 *= np.pi / 180.0
theta2 *= np.pi / 180.0
twopi = np.pi * 2.0
halfpi = np.pi * 0.5
eta1 = np.arctan2(np.sin(theta1), np.cos(theta1))
eta2 = np.arctan2(np.sin(theta2), np.cos(theta2))
eta2 -= twopi * np.floor((eta2 - eta1) / twopi)
if (theta2 - theta1 > np.pi) and (eta2 - eta1 < np.pi):
eta2 += twopi
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [Path.MOVETO, Path.LINETO]
codes[-2:] = [Path.LINETO, Path.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.zeros((length, 2), np.float_)
codes = Path.CURVE4 * np.ones((length, ), Path.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = Path.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset :end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset :end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return Path(vertices, codes)
arc = classmethod(arc)
#@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
(staticmethod) Returns a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
wedge = classmethod(wedge)
_get_path_collection_extents = get_path_collection_extents
def get_path_collection_extents(*args):
"""
Given a sequence of :class:`Path` objects, returns the bounding
box that encapsulates all of them.
"""
from transforms import Bbox
if len(args[1]) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_get_path_collection_extents(*args))
|
gpl-3.0
|
jakobworldpeace/scikit-learn
|
sklearn/neighbors/tests/test_nearest_centroid.py
|
305
|
4121
|
"""
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
|
bsd-3-clause
|
Moriadry/tensorflow
|
tensorflow/python/estimator/canned/dnn_linear_combined_test.py
|
46
|
26964
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)
def _dnn_only_model_fn(self,
features,
labels,
mode,
head,
hidden_units,
feature_columns,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined._dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=[],
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
# A function to mimic linear-regressor init reuse same tests.
def _linear_regressor_fn(feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyRegressorPartitionerTest(
linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorEvaluationTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorIntegrationTest(
linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorTrainingTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn)
def _linear_classifier_fn(feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyClassifierTrainingTest(
linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierClassesEvaluationTest(
linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierPredictTest(
linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierIntegrationTest(
linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class DNNLinearCombinedRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
# A function to mimic dnn-classifier init reuse same tests.
def _dnn_classifier_fn(hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
# A function to mimic dnn-regressor init reuse same tests.
def _dnn_regressor_fn(hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 2
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes-1., batch_size * input_dimension,
dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNLinearCombinedTests(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, real_optimizer, var_name_prefix):
"""Verifies global_step is None and var_names start with given prefix."""
def _minimize(loss, global_step=None, var_list=None):
self.assertIsNone(global_step)
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
var_names = [var.name for var in trainable_vars]
self.assertTrue(
all([name.startswith(var_name_prefix) for name in var_names]))
# var is used to check this op called by training.
with ops.name_scope(''):
var = variables_lib.Variable(0., name=(var_name_prefix + '_called'))
with ops.control_dependencies([var.assign(100.)]):
return real_optimizer.minimize(loss, global_step, var_list)
optimizer_mock = test.mock.NonCallableMagicMock(
spec=optimizer_lib.Optimizer, wraps=real_optimizer)
optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)
return optimizer_mock
def test_train_op_calls_both_dnn_and_linear(self):
opt = gradient_descent.GradientDescentOptimizer(1.)
x_column = feature_column.numeric_column('x')
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[0.], [1.]])},
y=np.array([[0.], [1.]]),
batch_size=1,
shuffle=False)
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[x_column],
# verifies linear_optimizer is used only for linear part.
linear_optimizer=self._mock_optimizer(opt, 'linear'),
dnn_hidden_units=(2, 2),
dnn_feature_columns=[x_column],
# verifies dnn_optimizer is used only for linear part.
dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
model_dir=self._model_dir)
est.train(input_fn, steps=1)
# verifies train_op fires linear minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'linear_called'))
# verifies train_op fires dnn minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'dnn_called'))
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
dvro/imbalanced-learn
|
imblearn/under_sampling/one_sided_selection.py
|
2
|
8213
|
"""Class to perform under-sampling based on one-sided selection method."""
from __future__ import print_function
from __future__ import division
import numpy as np
from collections import Counter
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_random_state
from ..base import BaseBinarySampler
from .tomek_links import TomekLinks
class OneSidedSelection(BaseBinarySampler):
"""Class to perform under-sampling based on one-sided selection method.
Parameters
----------
return_indices : bool, optional (default=False)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
size_ngh : int, optional (default=1)
Size of the neighbourhood to consider to compute the average
distance to the minority point samples.
n_seeds_S : int, optional (default=1)
Number of samples to extract in order to build the set S.
n_jobs : int, optional (default=-1)
The number of threads to open if possible.
**kwargs : keywords
Parameter to use for the Neareast Neighbours object.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
The method is based on [1]_.
This method support multiclass.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import OneSidedSelection
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> oss = OneSidedSelection(random_state=42)
>>> X_res, y_res = oss.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({1: 595, 0: 100})
References
----------
.. [1] M. Kubat, S. Matwin, "Addressing the curse of imbalanced training
sets: one-sided selection," In ICML, vol. 97, pp. 179-186, 1997.
"""
def __init__(self, return_indices=False, random_state=None,
size_ngh=1, n_seeds_S=1, n_jobs=-1, **kwargs):
super(OneSidedSelection, self).__init__()
self.return_indices = return_indices
self.random_state = random_state
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.n_jobs = n_jobs
self.kwargs = kwargs
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
random_state = check_random_state(self.random_state)
# Start with the minority class
X_min = X[y == self.min_c_]
y_min = y[y == self.min_c_]
# All the minority class samples will be preserved
X_resampled = X_min.copy()
y_resampled = y_min.copy()
# If we need to offer support for the indices
if self.return_indices:
idx_under = np.flatnonzero(y == self.min_c_)
# Loop over the other classes under picking at random
for key in self.stats_c_.keys():
# If the minority class is up, skip it
if key == self.min_c_:
continue
# Randomly get one sample from the majority class
# Generate the index to select
idx_maj_sample = random_state.randint(low=0,
high=self.stats_c_[key],
size=self.n_seeds_S)
maj_sample = X[y == key][idx_maj_sample]
# Create the set C
C_x = np.append(X_min,
maj_sample,
axis=0)
C_y = np.append(y_min,
[key] * self.n_seeds_S)
# Create the set S
S_x = X[y == key]
S_y = y[y == key]
# Remove the seed from S since that it will be added anyway
S_x = np.delete(S_x, idx_maj_sample, axis=0)
S_y = np.delete(S_y, idx_maj_sample, axis=0)
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
n_jobs=self.n_jobs,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = S_x[np.flatnonzero(pred_S_y != S_y), :]
sel_y = S_y[np.flatnonzero(pred_S_y != S_y)]
# If we need to offer support for the indices selected
# We concatenate the misclassified samples with the seed and the
# minority samples
if self.return_indices:
idx_tmp = np.flatnonzero(y == key)[
np.flatnonzero(pred_S_y != S_y)]
idx_under = np.concatenate((idx_under,
idx_maj_sample,
idx_tmp),
axis=0)
X_resampled = np.concatenate((X_resampled,
maj_sample,
sel_x),
axis=0)
y_resampled = np.concatenate((y_resampled,
[key] * self.n_seeds_S,
sel_y),
axis=0)
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)
nn.fit(X_resampled)
nns = nn.kneighbors(X_resampled, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
self.logger.debug('Looking for majority Tomek links ...')
links = TomekLinks.is_tomek(y_resampled, nns, self.min_c_)
self.logger.info('Under-sampling performed: %s', Counter(
y_resampled[np.logical_not(links)]))
# Check if the indices of the samples selected should be returned too
if self.return_indices:
# Return the indices of interest
return (X_resampled[np.logical_not(links)],
y_resampled[np.logical_not(links)],
idx_under[np.logical_not(links)])
else:
# Return data set without majority Tomek links.
return (X_resampled[np.logical_not(links)],
y_resampled[np.logical_not(links)])
|
mit
|
mcallaghan/tmv
|
BasicBrowser/tmv_app/management/commands/rerun_dtm.py
|
1
|
4873
|
from django.core.management.base import BaseCommand, CommandError
from tmv_app.models import *
import numpy as np
from sklearn.decomposition import NMF
from scipy.sparse import csr_matrix, find
from functools import partial
from multiprocess import Pool
from utils.db import *
from utils.utils import *
from scoping.models import *
from time import time
import gc, sys
from django.core import management
class Command(BaseCommand):
help = 'rerun a dynamic topic model with a different number \
or dynamic topics'
def add_arguments(self, parser):
parser.add_argument('run_id',type=int)
parser.add_argument('K',type=int)
#parser.add_argument('nWords',type=int,default=50)
#parser.add_argument('fileDest',type=str,default='')
def handle(self, *args, **options):
parent_run_id = options['run_id']
K = options['K']
nWords = 50 #options['nWords']
fileDest = ""#options['fileDest']
parent_stat = RunStats.objects.get(pk=parent_run_id)
n_features = parent_stat.max_features
if fileDest=='':
run_id = init(n_features)
stat = RunStats.objects.get(run_id=run_id)
stat.query = Query.objects.get(pk=parent_stat.query.id)
stat.method = "DT"
stat.parent_run_id = parent_run_id
stat.save()
for tp in parent_stat.periods.all():
stat.periods.add(tp)
tops = Topic.objects.filter(
run_id=parent_run_id,
topicterm__isnull=False
).distinct()
terms = Term.objects.all()
B = np.zeros((tops.count(),terms.count()))
wt = 0
for topic in tops:
tts = TopicTerm.objects.filter(
topic=topic
).order_by('-score')[:nWords]
if len(tts) == 0:
if fileDest != '':
print(wt)
continue
print(topic)
for tt in tts:
B[wt,tt.term.id] = tt.score*np.log1p(topic.score)
wt+=1
col_sum = np.sum(B,axis=0)
vocab_ids = np.flatnonzero(col_sum)
row_sum = np.sum(B,axis=1)
top_ids = np.flatnonzero(row_sum)
print(np.where(~B.any(axis=1)))
# we only want the columns where there are at least some
# topic-term values
B = B[:,vocab_ids]
print(B.shape)
print(np.where(~B.any(axis=1)))
if fileDest != '':
np.save(fileDest,B)
sys.exit()
nmf = NMF(
n_components=K, random_state=1,
alpha=.1, l1_ratio=.5
).fit(B)
## Add dynamic topics
dtopics = []
for k in range(K):
dtopic = DynamicTopic(
run_id=RunStats.objects.get(pk=run_id)
)
dtopic.save()
dtopics.append(dtopic)
dtopic_ids = list(
DynamicTopic.objects.filter(
run_id=run_id
).values_list('id',flat=True)
)
print(dtopic_ids)
##################
## Add the dtopic*term matrix to the db
print("Adding topicterms to db")
t0 = time()
ldalambda = find(csr_matrix(nmf.components_))
topics = range(len(ldalambda[0]))
tts = []
pool = Pool(processes=8)
tts.append(pool.map(partial(f_dlambda, m=ldalambda,
v_ids=vocab_ids,t_ids=dtopic_ids,run_id=run_id),topics))
pool.terminate()
tts = flatten(tts)
gc.collect()
sys.stdout.flush()
django.db.connections.close_all()
DynamicTopicTerm.objects.bulk_create(tts)
print("done in %0.3fs." % (time() - t0))
## Add the wtopic*dtopic matrix to the database
gamma = nmf.transform(B)
for topic in range(len(gamma)):
for dtopic in range(len(gamma[topic])):
if gamma[topic][dtopic] > 0:
tdt = TopicDTopic(
topic = tops[topic],
dynamictopic_id = dtopic_ids[dtopic],
score = gamma[topic][dtopic]
)
tdt.save()
## Calculate the primary dtopic for each topic
for t in tops:
try:
t.primary_dtopic = TopicDTopic.objects.filter(
topic=t
).order_by('-score').first().dynamictopic
t.save()
except:
pass
stat.error = parent_stat.error + nmf.reconstruction_err_
stat.errortype = "Frobenius"
stat.last_update=timezone.now()
stat.save()
print("updating and summarising run, {}".format(run_id))
management.call_command('update_run',run_id)
management.call_command('update_run',run_id)
|
gpl-3.0
|
wzbozon/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
207
|
27395
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
3manuek/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits.py
|
268
|
2723
|
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
bsd-3-clause
|
ClimbsRocks/scikit-learn
|
examples/linear_model/plot_sgd_iris.py
|
58
|
2202
|
"""
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
|
bsd-3-clause
|
daniaki/pyPPI
|
pyppi/tests/test_predict.py
|
1
|
18250
|
import os
import numpy as np
from itertools import product
from Bio import SwissProt
from unittest import TestCase
from ..database import create_session, delete_database, cleanup_database
from ..database.models import Interaction, Protein
from ..database.utilities import create_interaction
from ..database.exceptions import ObjectAlreadyExists
from ..data_mining.uniprot import parse_record_into_protein
from ..data_mining.features import compute_interaction_features
from ..data_mining.ontology import get_active_instance
from ..models.binary_relevance import MixedBinaryRelevanceClassifier
from ..predict import _check_classifier_and_selection
from ..predict import _update_missing_protein_map
from ..predict import _create_missing_interactions
from ..predict import classify_interactions
from ..predict.utilities import load_dataset, DEFAULT_SELECTION
from sklearn.base import clone
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
base_path = os.path.dirname(__file__)
db_path = os.path.normpath("{}/databases/test.db".format(base_path))
test_obo_file = '{}/{}'.format(base_path, "test_data/test_go.obo.gz")
dag = get_active_instance(filename=test_obo_file)
class TestCheckClassifierAndSelection(TestCase):
def test_valueerror_custom_classifier_no_selection(self):
with self.assertRaises(ValueError):
_check_classifier_and_selection(classifier=1, selection=None)
def test_valueerror_invalid_selection(self):
with self.assertRaises(ValueError):
_check_classifier_and_selection(classifier=1, selection=['1'])
class TestUpdateMissingProteinMap(TestCase):
def setUp(self):
self.session, self.engine = create_session(db_path)
self.p1 = Protein(uniprot_id='A', taxon_id=9606, reviewed=True)
self.p2 = Protein(uniprot_id='B', taxon_id=1, reviewed=True)
self.p1.save(self.session, commit=True)
self.p2.save(self.session, commit=True)
def tearDown(self):
delete_database(self.session)
cleanup_database(self.session, self.engine)
def test_adds_new_proteins_to_map(self):
ppis = [('A', 'A'), ('A', 'P31946')]
pm = _update_missing_protein_map(ppis, self.session)
expected = {
'A': self.p1,
'P31946': Protein.query.get(3)
}
self.assertEqual(expected, pm)
def test_adds_invalid_proteins_as_none(self):
ppis = [('A', 'A'), ('A', 'P3194')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
'P3194': None
}
self.assertEqual(expected, pm)
def test_proteins_different_taxonid_added_as_none(self):
ppis = [('A', 'A'), ('B', 'A')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
'B': None
}
self.assertEqual(expected, pm)
ppis = [('A', 'A'), ('Q3TYD4', 'A')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
'Q3TYD4': None
}
self.assertEqual(expected, pm)
def test_ignores_taxonid_if_none(self):
ppis = [('A', 'A'), ('B', 'A')]
pm = _update_missing_protein_map(
ppis, self.session, taxon_id=None, verbose=False)
expected = {
'A': self.p1,
'B': self.p2
}
self.assertEqual(expected, pm)
ppis = [('A', 'A'), ('Q3TYD4', 'A')]
pm = _update_missing_protein_map(
ppis, self.session, taxon_id=None, verbose=False)
expected = {
'A': self.p1,
'Q3TYD4': Protein.query.get(3)
}
self.assertEqual(expected, pm)
def test_ignores_none_input(self):
ppis = [(None, 'A')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
}
self.assertEqual(expected, pm)
class TestCreateMissingInteractions(TestCase):
def setUp(self):
self.session, self.engine = create_session(db_path)
delete_database(self.session)
self.p1 = Protein(uniprot_id='A', taxon_id=9606, reviewed=True)
self.p2 = Protein(uniprot_id='B', taxon_id=9606, reviewed=True)
self.p3 = Protein(uniprot_id='C', taxon_id=0, reviewed=True)
self.p4 = Protein(uniprot_id='D', taxon_id=0, reviewed=True)
self.p1.save(self.session, commit=True)
self.p2.save(self.session, commit=True)
self.p3.save(self.session, commit=True)
self.p4.save(self.session, commit=True)
self.i1 = Interaction(source=self.p1, target=self.p2)
self.i1.save(session=self.session, commit=True)
self.p_map = {p.uniprot_id: p for p in Protein.query.all()}
def tearDown(self):
delete_database(self.session)
cleanup_database(self.session, self.engine)
def test_existing_interactions_returned_and_invalid_is_empty_list(self):
valid, invalid = _create_missing_interactions(
ppis=[('A', 'B')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [self.i1])
self.assertEqual(invalid, [])
def test_interaction_with_none_source_added_to_invalid(self):
valid, invalid = _create_missing_interactions(
ppis=[(None, 'B')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [(None, 'B')])
def test_interaction_with_none_target_added_to_invalid(self):
valid, invalid = _create_missing_interactions(
ppis=[('A', None)],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('A', None)])
def test_interaction_with_differing_taxonid_added_to_invalid(self):
valid, invalid = _create_missing_interactions(
ppis=[('C', 'D')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('C', 'D')])
valid, invalid = _create_missing_interactions(
ppis=[('C', 'A')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('C', 'A')])
valid, invalid = _create_missing_interactions(
ppis=[('A', 'D')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('A', 'D')])
def test_new_interactions_created(self):
valid, invalid = _create_missing_interactions(
ppis=[('A', 'A')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [Interaction.query.get(2)])
self.assertEqual(invalid, [])
class TestMakePredictions(TestCase):
# This class implicitly also tests parse_predictions since
# make_predictions is essentially a wrapper for parse_predictions
# TODO: Separate these tests.
def setUp(self):
self.records = open(os.path.normpath(
"{}/test_data/test_sprot_records.dat".format(base_path)
), 'rt')
self.session, self.engine = create_session(db_path)
delete_database(self.session)
self.proteins = []
for record in SwissProt.parse(self.records):
protein = parse_record_into_protein(record)
protein.save(self.session, commit=True)
self.proteins.append(protein)
self.labels = ['Activation', 'Inhibition', 'Acetylation']
self.interactions = []
for protein_a, protein_b in product(self.proteins, self.proteins):
class_kwargs = compute_interaction_features(protein_a, protein_b)
label = '{},{}'.format(
self.labels[protein_a.id - 1],
self.labels[protein_b.id - 1]
)
try:
interaction = create_interaction(
protein_a, protein_b, labels=label, session=self.session,
verbose=False, save=True, commit=True, **class_kwargs
)
self.interactions.append(interaction)
except ObjectAlreadyExists:
continue
self.X, self.y, _ = load_dataset(
self.interactions, self.labels, selection=DEFAULT_SELECTION
)
base = Pipeline(steps=[
('vectorizer', CountVectorizer(
lowercase=False, stop_words=[':', 'GO'])),
('estimator', LogisticRegression(random_state=0))
])
self.clf = MixedBinaryRelevanceClassifier(
[clone(base) for _ in range(len(self.labels))]
)
self.clf.fit(self.X, self.y)
def tearDown(self):
delete_database(self.session)
cleanup_database(self.session, self.engine)
self.records.close()
def test_can_make_proba_predictions_on_existing_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_can_make_binary_predictions_on_existing_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=False, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_can_make_predictions_on_list_of_interaction_objects(self):
ppis = self.interactions
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_ignores_None_or_not_interaction_objects(self):
ppis = [self.interactions[0], None, '1']
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba([self.X[0]]), [None, '1']
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_returns_empty_list_no_valid_interactions(self):
ppis = [(1, 2), (1, 2, 3), None, '1']
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = ([], [(1, 2), (1, 2, 3), None, '1'])
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_typeerror_first_elem_not_interaction_or_tuple(self):
with self.assertRaises(TypeError):
ppis = [1, None, '1']
classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
def test_creates_new_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
delete_database(self.session)
classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
self.assertEqual(Interaction.query.count(), 6)
def test_removed_duplicate_interactions_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
ppis.append(ppis[0])
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_invalid_ppis_added_to_invalid(self):
ppis = [('A', 'B'), ('Q04917', 'X')]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = ([], ppis)
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_non_matching_taxonid_of_existing_ppis_added_to_invalid(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=0, verbose=False, session=self.session
)
expected = ([], ppis)
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_non_matching_taxonid_of_new_ppis_added_to_invalid(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
delete_database(self.session)
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=0, verbose=False, session=self.session
)
expected = ([], ppis)
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_taxonid_ignored_if_None(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=None, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_ignores_duplicate_entries(self):
ppi_1 = (
Protein.query.get(self.interactions[0].source).uniprot_id,
Protein.query.get(self.interactions[0].target).uniprot_id
)
ppi_2 = (
Protein.query.get(self.interactions[0].target).uniprot_id,
Protein.query.get(self.interactions[0].source).uniprot_id
)
ppis = [ppi_1, ppi_2]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
X, _, _ = load_dataset(
[self.interactions[0]], self.labels, selection=DEFAULT_SELECTION
)
expected = (
self.clf.predict_proba(X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_multiple_choice_uniprot_ids_get_put_in_invalid(self):
ppis = [('Q8NDH8', 'P0CG12')]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = ([], [('P0CG12', 'Q8NDH8')])
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_outdated_accessions_map_to_most_recent_entries(self):
ppis = [('A8K9K2', 'A8K9K2')] # maps to P31946
entry = Protein.get_by_uniprot_id('P31946')
interaction = Interaction.get_by_interactors(entry, entry)
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
X, _, _ = load_dataset(
[interaction], self.labels, selection=DEFAULT_SELECTION
)
expected = (
self.clf.predict_proba(X), [], {'A8K9K2': 'P31946'}
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
self.assertEqual(predictions[2], expected[2])
|
mit
|
agrimaldi/metaseq
|
metaseq/plotutils.py
|
3
|
34743
|
"""
Module with handy utilities for plotting genomic signal
"""
import itertools
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import mlab
import numpy as np
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import gridspec
import colormap_adjust
from scipy import stats
def ci_plot(x, arr, conf=0.95, ax=None, line_kwargs=None, fill_kwargs=None):
"""
Plots the mean and 95% ci for the given array on the given axes
Parameters
----------
x : 1-D array-like
x values for the plot
arr : 2-D array-like
The array to calculate mean and std for
conf : float [.5 - 1]
Confidence interval to use
ax : matplotlib.Axes
The axes object on which to plot
line_kwargs : dict
Additional kwargs passed to Axes.plot
fill_kwargs : dict
Additiona kwargs passed to Axes.fill_between
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
line_kwargs = line_kwargs or {}
fill_kwargs = fill_kwargs or {}
m, lo, hi = ci(arr, conf)
ax.plot(x, m, **line_kwargs)
ax.fill_between(x, lo, hi, **fill_kwargs)
return ax
def imshow(arr, x=None, ax=None, vmin=None, vmax=None, percentile=True,
strip=False, features=None, conf=0.95, sort_by=None,
line_kwargs=None, fill_kwargs=None, imshow_kwargs=None, figsize=(5, 12),
width_ratios=(4, 1), height_ratios=(4, 1),
subplot_params=dict(wspace=0.1, hspace=0.1),
subset_by=None, subset_order=None,):
"""
Do-it-all function to help with plotting heatmaps
Parameters
----------
arr : array-like
x : 1D array
X values to use. If None, use range(arr.shape[1])
ax : matplotlib.Axes
If not None, then only plot the array on the provided axes. This will
ignore any additional arguments provided that apply to figure-level
configuration or to the average line plot. For example, `figsize`,
`width_ratios`, `height_ratios`, `subplot_params`, `line_kwargs`, and
`fill_kwargs` will all be ignored.
vmin, vmax : float
percentile : bool
If True, then treat values for `vmin` and `vmax` as percentiles rather
than absolute values.
strip : bool
Include a strip plot alongside the array
features : pybedtools.BedTool or string filename
Features used to construct the array
conf : float
Confidence interval to use in line plot.
sort_by : array-like
Use the provided array to sort the array (e.g., an array of expression
values). This array will be argsorted to get the proper order.
line_kwargs, fill_kwargs : dict
Passed directly to `ci_plot`.
figsize : tuple
(Width, height) of the figure to create.
imshow_kwargs : dict
Passed directly to matplotlib.pyplot.imshow. By default, arguments
used are `origin='lower'`, `aspect="auto"` and a colormap from
colormap_adjust.smart_colormap generated using the provided `vmin` and
`vmax`.
width_ratios, height_ratios: tuple
These tuples are passed to the `new_shell` function. The default
values set up a 2x2 configuration of panels for heatmap, line plot,
colorbar axes, and optional strip plot. However modifying
`width_ratios` or `height_ratios` can be used to create more or fewer panels.
subplot_params : dict
Passed to Figure.subplots_adjust
subset_by : array
An array of any type (but usually int or str) that contains a class
label for each row in the heatmap array. For example, to subset by
expression, an array the values of "up", "down", or "unchanged" at each
of the positions could be provided.
Note that the heatmap array is first sorted by `sort_by` and then split
into groups according to `subset_by`, so each subset remains sorted by
`sort_by`.
subset_order : list-like
This provides the order in which the subsets are plotted. Since the
default imshow arguments contain `origin="lower"`, these will be
plotted in order starting at the bottom of the heatmap.
"""
if ax is None:
fig = new_shell(
figsize=figsize,
strip=strip,
subplot_params=subplot_params,
width_ratios=width_ratios,
height_ratios=height_ratios)
if x is None:
x = np.arange(arr.shape[1] + 1)
if percentile:
if vmin is None:
vmin = arr.min()
else:
vmin = mlab.prctile(arr.ravel(), vmin)
if vmax is None:
vmax = arr.max()
else:
vmax = mlab.prctile(arr.ravel(), vmax)
else:
if vmin is None:
vmin = arr.min()
if vmax is None:
vmax = arr.max()
cmap = colormap_adjust.smart_colormap(vmin, vmax)
_imshow_kwargs = dict(origin='lower', cmap=cmap, vmin=vmin, vmax=vmax,
aspect='auto')
if imshow_kwargs is not None:
_imshow_kwargs.update(imshow_kwargs)
# previously we did an argsort first; with subsetting we don't want to do
# that yet....
#if sort_by is not None:
# ind = np.argsort(sort_by)
#else:
# ind = np.arange(arr.shape[0])
if sort_by is None:
sort_by = np.arange(arr.shape[0])
if ax is None:
array_ax = fig.array_axes
else:
array_ax = ax
# If not provided, assume all in the same subset.
if subset_by is None:
subset_by = np.zeros(arr.shape[0])
# Ensure always array, since we're doing indexing tricks
if not isinstance(subset_by, np.ndarray):
subset_by = np.array(subset_by)
# If not provided, use sorted order
if subset_order is None:
subset_order = sorted(np.unique(subset_by))
inds = []
for cls in subset_order:
subset_ind = np.nonzero(subset_by == cls)[0]
subset_sort_by = sort_by[subset_ind]
subset_argsort_by = np.argsort(subset_sort_by)
inds.append(subset_ind[subset_argsort_by])
ind = np.concatenate(inds)
mappable = array_ax.imshow(
arr[ind, :],
extent=(x.min(), x.max(), 0, arr.shape[0]),
**_imshow_kwargs
)
if line_kwargs is None:
line_kwargs = {}
if fill_kwargs is None:
fill_kwargs = {}
if isinstance(line_kwargs, dict):
line_kwargs = [line_kwargs]
if isinstance(fill_kwargs, dict):
fill_kwargs = [fill_kwargs]
_line_kwargs = itertools.cycle(line_kwargs)
_fill_kwargs = itertools.cycle(fill_kwargs)
if ax is None:
plt.colorbar(mappable, fig.cax)
for subset_ind, label, _lkw, _fkw in zip(inds, subset_order, _line_kwargs, _fill_kwargs):
ci_plot(
x,
arr[subset_ind],
ax=fig.line_axes,
line_kwargs=_lkw,
fill_kwargs=_fkw,
)
return fig
else:
return ax.figure
def add_labels_to_subsets(ax, subset_by, subset_order, text_kwargs=None,
add_hlines=True, hline_kwargs=None):
"""
Helper function for adding labels to subsets within a heatmap.
Assumes that imshow() was called with `subsets` and `subset_order`.
Parameters
----------
ax : matplotlib.Axes
The axes to label. Generally you can use `fig.array_axes` attribute of
the Figure object returned by `metaseq.plotutils.imshow`.
subset_by, subset_order : array, list
See `metaseq.plotutils.imshow()` docstring; these should be the same
`subsets` and `subset_order` that were provided to that function.
"""
_text_kwargs = dict(transform=ax.get_yaxis_transform())
if text_kwargs:
_text_kwargs.update(text_kwargs)
_hline_kwargs = dict(color='k')
if hline_kwargs:
_hline_kwargs.update(hline_kwargs)
pos = 0
for label in subset_order:
ind = subset_by == label
last_pos = pos
pos += sum(ind)
if add_hlines:
ax.axhline(pos, **_hline_kwargs)
ax.text(
1.1,
last_pos + (pos - last_pos)/2.0,
label,
**_text_kwargs)
def calculate_limits(array_dict, method='global', percentiles=None, limit=()):
"""
Calculate limits for a group of arrays in a flexible manner.
Returns a dictionary of calculated (vmin, vmax), with the same keys as
`array_dict`.
Useful for plotting heatmaps of multiple datasets, and the vmin/vmax values
of the colormaps need to be matched across all (or a subset) of heatmaps.
Parameters
----------
array_dict : dict of np.arrays
method : {'global', 'independent', callable}
If method="global", then use the global min/max values across all
arrays in array_dict. If method="independent", then each array will
have its own min/max calcuated. If a callable, then it will be used to
group the keys of `array_dict`, and each group will have its own
group-wise min/max calculated.
percentiles : None or list
If not None, a list of (lower, upper) percentiles in the range [0,100].
"""
if percentiles is not None:
for percentile in percentiles:
if not 0 <= percentile <= 100:
raise ValueError("percentile (%s) not between [0, 100]")
if method == 'global':
all_arrays = np.concatenate(
[i.ravel() for i in array_dict.itervalues()]
)
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
d = dict([(i, (vmin, vmax)) for i in array_dict.keys()])
elif method == 'independent':
d = {}
for k, v in array_dict.iteritems():
d[k] = (v.min(), v.max())
elif hasattr(method, '__call__'):
d = {}
sorted_keys = sorted(array_dict.keys(), key=method)
for group, keys in itertools.groupby(sorted_keys, method):
keys = list(keys)
all_arrays = np.concatenate([array_dict[i] for i in keys])
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
for key in keys:
d[key] = (vmin, vmax)
return d
def ci(arr, conf=0.95):
"""
Column-wise confidence interval.
Parameters
----------
arr : array-like
conf : float
Confidence interval
Returns
-------
m : array
column-wise mean
lower : array
lower column-wise confidence bound
upper : array
upper column-wise confidence bound
"""
m = arr.mean(axis=0)
n = len(arr)
se = arr.std(axis=0) / np.sqrt(n)
h = se * stats.t._ppf((1 + conf) / 2., n - 1)
return m, m - h, m + h
def nice_log(x):
"""
Uses a log scale but with negative numbers.
:param x: NumPy array
"""
neg = x < 0
xi = np.log2(np.abs(x) + 1)
xi[neg] = -xi[neg]
return xi
def tip_zscores(a):
"""
Calculates the "target identification from profiles" (TIP) zscores
from Cheng et al. 2001, Bioinformatics 27(23):3221-3227.
:param a: NumPy array, where each row is the signal for a feature.
"""
weighted = a * a.mean(axis=0)
scores = weighted.sum(axis=1)
zscores = (scores - scores.mean()) / scores.std()
return zscores
def fdrcorrection(pvals, alpha=0.05, method='indep'):
'''
NOTE: This function was copied from
statsmodels.sandbox.stats.multicomp.fdrcorrection0, from statsmodels
version 0.5.0.
This is to avoid requiring all of statsmodels to be a dependency for
metaseq, just for this function.
pvalue correction for false discovery rate
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests. Both are
available in the function multipletests, as method=`fdr_bh`, resp.
`fdr_by`.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'indep', 'negcorr')
Returns
-------
rejected : array, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : array
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
If there is prior information on the fraction of true hypothesis, then
alpha should be set to alpha * m/m_0 where m is the number of tests, given
by the p-values, and m_0 is an estimate of the true hypothesis. (see
Benjamini, Krieger and Yekuteli)
The two-step method of Benjamini, Krieger and Yekutiel that estimates the
number of false hypotheses will be available (soon).
Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and
'n' for fdr_by.
'''
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1./np.arange(1, len(pvals_sorted)+1)) # corrected this
ecdffactor = _ecdf(pvals_sorted) / cm
# elif method in ['n', 'negcorr']:
# cm = np.sum(np.arange(len(pvals)))
# ecdffactor = ecdf(pvals_sorted)/cm
else:
raise ValueError('only indep and necorr implemented')
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
pvals_corrected[pvals_corrected > 1] = 1
return reject[sortrevind], pvals_corrected[sortrevind]
def tip_fdr(a, alpha=0.05):
"""
Returns adjusted TIP p-values for a particular `alpha`.
(see :func:`tip_zscores` for more info)
:param a: NumPy array, where each row is the signal for a feature
:param alpha: False discovery rate
"""
zscores = tip_zscores(a)
pvals = stats.norm.pdf(zscores)
rejected, fdrs = fdrcorrection(pvals)
return fdrs
def prepare_logged(x, y):
"""
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
"""
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi
def new_shell(figsize=(5, 12), strip=False, height_ratios=(4, 1),
width_ratios=(4, 1), subplot_params=None):
if subplot_params is None:
subplot_params = {}
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(
len(height_ratios),
len(width_ratios),
height_ratios=height_ratios,
width_ratios=width_ratios,
**subplot_params)
fig.array_axes = plt.subplot(gs[0, 0])
if strip:
fig.strip_axes = plt.subplot(gs[0, 1], sharey=fig.array_axes)
fig.line_axes = plt.subplot(gs[1, 0], sharex=fig.array_axes)
fig.cax = plt.subplot(gs[1, 1])
fig.gs = gs
return fig
def matrix_and_line_shell(figsize=(5, 12), strip=False):
"""
Helper function to construct an empty figure that has space for a matrix,
a summary line plot directly below it, a colorbar axis, and an optional
"strip" axis that parallels the matrix (and shares its y-axis) where data
can be added to create callbacks.
Returns a tuple of (fig, matrix_ax, line_ax, strip_ax, colorbar_ax) that
can then be used to plot upon.
:param figsize: Tuple of (width, height), in inches, for the figure to be
:param strip: If `strip` is False, then the returned `strip_ax` will be
None and no strip axes will be created.
"""
fig = plt.figure(figsize=figsize)
# Constants to keep track
if strip:
STRIP_COLS = 1
else:
STRIP_COLS = 0
ROWS = 4
COLS = 8 + STRIP_COLS
MAT_COLS = 7
MAT_ROWS = 3
LINE_ROWS = ROWS - MAT_ROWS
mat_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, STRIP_COLS),
rowspan=MAT_ROWS,
colspan=MAT_COLS,
)
line_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(MAT_ROWS, STRIP_COLS),
rowspan=LINE_ROWS,
colspan=MAT_COLS,
sharex=mat_ax)
if strip:
strip_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, 0),
rowspan=MAT_ROWS,
colspan=STRIP_COLS,
sharey=mat_ax,
)
else:
strip_ax = None
cax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(ROWS - MAT_ROWS, MAT_COLS + STRIP_COLS),
rowspan=1,
colspan=1,
)
fig.subplots_adjust(hspace=0.1, wspace=0.2, right=0.88, left=0.23)
return fig, mat_ax, line_ax, strip_ax, cax
def clustered_sortind(x, k=10, scorefunc=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param scorefunc: Optional function for sorting rows within clusters. Must
accept a single argument of a NumPy array.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if not scorefunc:
def scorefunc(x):
return x.mean(axis=0).max()
for label in range(k):
ind = labels == label
score = scorefunc(x[ind, :])
scores[ind] = score
pos = 0
breaks = []
ind = np.argsort(scores)
for k, g in itertools.groupby(labels[ind]):
pos += len(list(g))
breaks.append(pos)
return ind, breaks
def new_clustered_sortind(x, k=10, row_key=None, cluster_key=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param row_key:
Optional function to act as a sort key for sorting rows within
clusters. Signature should be `scorefunc(a)` where `a` is a 1-D NumPy
array.
:param cluster_key:
Optional function for sorting clusters. Signature is `clusterfunc(a)`
where `a` is a NumPy array containing all rows of `x` for cluster `i`.
It must return a single value.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if cluster_key:
# It's easier for calling code to provide something that operates on
# a cluster level, but here it's converted to work on a label level
# that looks in to the array `x`.
def _cluster_key(i):
return cluster_key(x[labels == i, :])
sorted_labels = sorted(range(k), key=_cluster_key)
else:
# Otherwise just use them as-is.
sorted_labels = range(k)
if row_key:
# Again, easier to provide a function to operate on a row. But here we
# need it to accept an index
def _row_key(i):
return row_key(x[i, :])
final_ind = []
breaks = []
pos = 0
for label in sorted_labels:
# which rows in `x` have this label
label_inds = np.nonzero(labels == label)[0]
if row_key:
label_sort_ind = sorted(label_inds, key=_row_key)
else:
label_sort_ind = label_inds
for li in label_sort_ind:
final_ind.append(li)
pos += len(label_inds)
breaks.append(pos)
return np.array(final_ind), np.array(breaks)
def input_ip_plots(iparr, inputarr, diffed, x, sort_ind,
prefix=None, limits1=(None, None), limits2=(None, None),
hlines=None, vlines=None):
"""
All-in-one plotting function to make a 5-panel figure.
Panels are IP, input, and diffed; plus 2 line plots showing averages.
:param iparr, inputarr: NumPy arrays constructed by a genomic_signal object
:param diffed: Difference of `iparr` and `inputarr`, but can be some other
transformation.
:param x: Extent to use -- for TSSs, maybe something like
np.linspace(-1000, 1000, bins), or for just bin IDs, something like
`np.arange(bins)`.
:param sort_ind: row order for each of the 3 panels -- usually interesting
to use `clustered_sortind` or `tip_zscores`
:param prefix: Used to prefix plot titles with '%(prefix)s IP", etc
:param limits1: Tuple passed to the Normalize function for IP and input.
:param limits2: Tuple passed tot he Normalize function for the diffed array
:param hlines: List of (position, kwarg) tuples for plotting horizontal
lines. Kwargs are passed directly to axhline. Useful for delimiting
clusters, if you used `clustered_sortind` and have both `row_order` and
`breaks`.
:param vlines: List of (position, kwargs) tuples. A vertical line will be
plotted at each position using kwargs.
"""
# global min and max
gmin = min(iparr.min(), inputarr.min())
gmax = max(iparr.max(), inputarr.max())
fig = plt.figure(figsize=(10, 10))
# 3 arrays, 2 line plots, a gene strip, and 2 colorbars. Plots share the
# axes that make sense
#
# 3 arrays
ax1 = plt.subplot2grid(
(9, 9), (0, 0), colspan=3, rowspan=6)
ax2 = plt.subplot2grid(
(9, 9), (0, 3), colspan=3, rowspan=6, sharex=ax1, sharey=ax1)
ax3 = plt.subplot2grid(
(9, 9), (0, 6), colspan=3, rowspan=6, sharex=ax1, sharey=ax1)
# 2 line plots
ax4 = plt.subplot2grid((9, 9), (6, 3), colspan=3, rowspan=3, sharex=ax1)
ax5 = plt.subplot2grid((9, 9), (6, 6), colspan=3, rowspan=3, sharex=ax1)
# 2 colorbars
cax1 = plt.Axes(fig, rect=(0.05, 0.25, 0.25, 0.025))
cax2 = plt.Axes(fig, rect=(0.05, 0.15, 0.25, 0.025))
# For nice imshow axes
extent = (min(x), max(x), 0, diffed.shape[0])
cm = matplotlib.cm.gist_gray
cm.set_bad('k')
cm.set_over('r')
cm.set_under('b')
limits1 = list(limits1)
limits2 = list(limits2)
all_base = np.column_stack((iparr.ravel(), inputarr.ravel())).ravel()
if limits1[0] is None:
limits1[0] = mlab.prctile(
all_base, 1. / all_base.size)
if limits1[1] is None:
limits1[1] = mlab.prctile(
all_base, 100 - 1. / all_base.size)
if limits2[0] is None:
limits2[0] = mlab.prctile(
diffed.ravel(), 1. / all_base.size)
if limits2[1] is None:
limits2[1] = mlab.prctile(
diffed.ravel(), 100 - 1. / all_base.size)
del all_base
imshow_kwargs = dict(
interpolation='nearest',
aspect='auto',
cmap=cm,
norm=matplotlib.colors.Normalize(*limits1),
extent=extent,
origin='lower')
# modify kwargs for diffed (by changing the normalization)
diffed_kwargs = imshow_kwargs.copy()
diffed_kwargs['norm'] = matplotlib.colors.Normalize(*limits2)
# IP
mappable1 = ax1.imshow(iparr[sort_ind, :], **imshow_kwargs)
# input
mappable2 = ax2.imshow(inputarr[sort_ind, :], **imshow_kwargs)
# diffed
mappable3 = ax3.imshow((diffed)[sort_ind, :], **diffed_kwargs)
# IP and input line plot with vertical line
ax4.plot(x, inputarr.mean(axis=0), color='k', linestyle='--',
label='input')
ax4.plot(x, iparr.mean(axis=0), color='k', label='ip')
ax4.axvline(0, color='k', linestyle=':')
# Diffed line plot with vertical line
ax5.plot(x, diffed.mean(axis=0), 'k', label='enrichment')
ax5.axvline(0, color='k', linestyle=':')
# Colorbars
cbar1 = fig.colorbar(mappable1, cax1, orientation='horizontal')
cbar2 = fig.colorbar(mappable3, cax2, orientation='horizontal')
fig.add_axes(cax1)
fig.add_axes(cax2)
# labeling...
ax1.set_ylabel('features')
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
ax4.set_xlabel('bp')
ax4.set_ylabel('mean reads per million mapped reads')
ax5.set_xlabel('bp')
cax1.set_xlabel('Reads per million mapped reads')
cax2.set_xlabel('Enrichment (RPMMR)')
if prefix is None:
prefix = ""
ax1.set_title('%s IP' % prefix)
ax2.set_title('%s input' % prefix)
ax3.set_title('Difference')
# diffed line plot should have y ax on right
ax5.yaxis.set_ticks_position('right')
ax5.yaxis.set_label_position('right')
ax5.set_ylabel('enriched reads per million mapped reads')
# Legends
ax4.legend(loc='best', frameon=False)
ax5.legend(loc='best', frameon=False)
# Make sure everybody snaps to xmin/xmax
for ax in [ax1, ax2, ax3, ax4, ax5]:
ax.axis(xmin=extent[0], xmax=extent[1])
if not hlines:
hlines = []
if not vlines:
vlines = []
for ax in [ax1, ax2, ax3]:
for pos, kwargs in hlines:
ax.axhline(pos, **kwargs)
for pos, kwargs in vlines:
ax.axvline(pos, **kwargs)
fig.subplots_adjust(bottom=0.05, top=0.95, hspace=0.75, wspace=0.9)
return fig
def _updatecopy(orig, update_with, keys=None, override=False):
"""
Update a copy of dest with source. If `keys` is a list, then only update
with those keys.
"""
d = orig.copy()
if keys is None:
keys = update_with.keys()
for k in keys:
if k in update_with:
if k in d and not override:
continue
d[k] = update_with[k]
return d
def _clean(z):
"""
Return a version of z that only has finite values
"""
return z[np.isfinite(z)]
class MarginalHistScatter(object):
def __init__(self, ax, hist_size=0.6, pad=0.05):
"""
Class to enable incremental appending of scatterplots, each of which
generate additional marginal histograms.
"""
self.scatter_ax = ax
self.fig = ax.figure
self.divider = make_axes_locatable(self.scatter_ax)
self.top_hists = []
self.right_hists = []
self.hist_size = hist_size
self.pad = pad
self.xfirst_ax = None
self.yfirst_ax = None
# will hold histogram data
self.hxs = []
self.hys = []
@property
def xmax(self):
return self.scatter_ax.dataLim.xmax
@property
def ymax(self):
return self.scatter_ax.dataLim.ymax
@property
def xmin(self):
return self.scatter_ax.dataLim.xmin
@property
def ymin(self):
return self.scatter_ax.dataLim.ymin
@property
def limits(self):
return (self.xmin, self.xmax, self.ymin, self.ymax)
def append(self, x, y, scatter_kwargs, hist_kwargs=None, xhist_kwargs=None,
yhist_kwargs=None, num_ticks=3, labels=None, hist_share=False,
marginal_histograms=True):
"""
Adds a new scatter to self.scatter_ax as well as marginal histograms
for the same data, borrowing addtional room from the axes.
Parameters
----------
x, y : array-like
Data to be plotted
scatter_kwargs : dict
Keyword arguments that are passed directly to scatter().
hist_kwargs : dict
Keyword arguments that are passed directly to hist(), for both the
top and side histograms.
xhist_kwargs, yhist_kwargs : dict
Additional, margin-specific kwargs for the x or y histograms
respectively. These are used to update `hist_kwargs`
num_ticks : int
How many tick marks to use in each histogram's y-axis
labels : array-like
Optional NumPy array of labels that will be set on the collection
so that they can be accessed by a callback function.
hist_share : bool
If True, then all histograms will share the same frequency axes.
Useful for showing relative heights if you don't want to use the
hist_kwarg `normed=True`
marginal_histograms : bool
Set to False in order to disable marginal histograms and just use
as a normal scatterplot.
"""
scatter_kwargs = scatter_kwargs or {}
hist_kwargs = hist_kwargs or {}
xhist_kwargs = xhist_kwargs or {}
yhist_kwargs = yhist_kwargs or {}
yhist_kwargs.update(dict(orientation='horizontal'))
# Plot the scatter
coll = self.scatter_ax.scatter(x, y, **scatter_kwargs)
coll.labels = labels
if not marginal_histograms:
return
xhk = _updatecopy(hist_kwargs, xhist_kwargs)
yhk = _updatecopy(hist_kwargs, yhist_kwargs)
axhistx = self.divider.append_axes(
'top', size=self.hist_size,
pad=self.pad, sharex=self.scatter_ax, sharey=self.xfirst_ax)
axhisty = self.divider.append_axes(
'right', size=self.hist_size,
pad=self.pad, sharey=self.scatter_ax, sharex=self.yfirst_ax)
axhistx.yaxis.set_major_locator(
MaxNLocator(nbins=num_ticks, prune='both'))
axhisty.xaxis.set_major_locator(
MaxNLocator(nbins=num_ticks, prune='both'))
if not self.xfirst_ax and hist_share:
self.xfirst_ax = axhistx
if not self.yfirst_ax and hist_share:
self.yfirst_ax = axhisty
# Keep track of which axes are which, because looking into fig.axes
# list will get awkward....
self.top_hists.append(axhistx)
self.right_hists.append(axhisty)
# Scatter will deal with NaN, but hist will not. So clean the data
# here.
hx = _clean(x)
hy = _clean(y)
self.hxs.append(hx)
self.hys.append(hy)
# Only plot hists if there's valid data
if len(hx) > 0:
if len(hx) == 1:
_xhk = _updatecopy(orig=xhk, update_with=dict(bins=[hx[0], hx[0]]), keys=['bins'])
axhistx.hist(hx, **_xhk)
else:
axhistx.hist(hx, **xhk)
if len(hy) > 0:
if len(hy) == 1:
_yhk = _updatecopy(orig=yhk, update_with=dict(bins=[hy[0], hy[0]]), keys=['bins'])
axhisty.hist(hy, **_yhk)
else:
axhisty.hist(hy, **yhk)
# Turn off unnecessary labels -- for these, use the scatter's axes
# labels
for txt in axhisty.get_yticklabels() + axhistx.get_xticklabels():
txt.set_visible(False)
for txt in axhisty.get_xticklabels():
txt.set_rotation(-90)
def add_legends(self, xhists=True, yhists=False, scatter=True, **kwargs):
"""
Add legends to axes.
"""
axs = []
if xhists:
axs.extend(self.hxs)
if yhists:
axs.extend(self.hys)
if scatter:
axs.extend(self.ax)
for ax in axs:
ax.legend(**kwargs)
|
mit
|
drankye/arrow
|
python/pyarrow/tests/test_table.py
|
1
|
2477
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pyarrow.compat import unittest
import pyarrow as arrow
A = arrow
import pandas as pd
class TestRowBatch(unittest.TestCase):
def test_basics(self):
data = [
A.from_pylist(range(5)),
A.from_pylist([-10, -5, 0, 5, 10])
]
num_rows = 5
descr = A.schema([A.field('c0', data[0].type),
A.field('c1', data[1].type)])
batch = A.RowBatch(descr, num_rows, data)
assert len(batch) == num_rows
assert batch.num_rows == num_rows
assert batch.num_columns == len(data)
class TestTable(unittest.TestCase):
def test_basics(self):
data = [
A.from_pylist(range(5)),
A.from_pylist([-10, -5, 0, 5, 10])
]
table = A.Table.from_arrays(('a', 'b'), data, 'table_name')
assert table.name == 'table_name'
assert len(table) == 5
assert table.num_rows == 5
assert table.num_columns == 2
assert table.shape == (5, 2)
for col in table.itercolumns():
for chunk in col.data.iterchunks():
assert chunk is not None
def test_pandas(self):
data = [
A.from_pylist(range(5)),
A.from_pylist([-10, -5, 0, 5, 10])
]
table = A.Table.from_arrays(('a', 'b'), data, 'table_name')
# TODO: Use this part once from_pandas is implemented
# data = {'a': range(5), 'b': [-10, -5, 0, 5, 10]}
# df = pd.DataFrame(data)
# A.Table.from_pandas(df)
df = table.to_pandas()
assert set(df.columns) == set(('a', 'b'))
assert df.shape == (5, 2)
assert df.ix[0, 'b'] == -10
|
apache-2.0
|
procoder317/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
226
|
1384
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
|
ChanChiChoi/scikit-learn
|
sklearn/utils/tests/test_shortest_path.py
|
88
|
2828
|
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
|
bsd-3-clause
|
JosmanPS/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
ChinaQuants/bokeh
|
bokeh/charts/builders/line_builder.py
|
2
|
8084
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from six import iteritems
from itertools import chain
from ..builder import XYBuilder, create_and_build
from ..glyphs import LineGlyph
from ..attributes import DashAttr, ColorAttr
from ..data_source import NumericalColumnsAssigner
from ...models.sources import ColumnDataSource
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def Line(data=None, x=None, y=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builders.line_builder.LineBuilder>` to
render the glyphs.
The line chart is typically is used with column oriented data, where each column
contains comparable measurements and the column names are treated as a categorical
variable for differentiating the measurement values. One of the columns can be used as
an index for either the x or y axis.
.. note::
Only the x or y axis can display multiple variables, while the other is used
as an index.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
In addition the the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
.. note::
This chart type differs on input types as compared to other charts,
due to the way that line charts typically are plotting labeled series. For
example, a column for APPL stock prices over time. Another way this could be
plotted is to have a DataFrame with a column of `stock_label` and columns of
`price`, which is the stacked format. Both should be supported, but the former
is the expected one. Internally, the latter format is being derived.
Returns:
:class:`Chart`: includes glyph renderers that generate the lines
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
kws['x'] = x
kws['y'] = y
return create_and_build(LineBuilder, data, **kws)
class LineBuilder(XYBuilder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
default_attributes = {'color': ColorAttr(),
'dash': DashAttr()}
dimensions = ['y', 'x']
column_selector = NumericalColumnsAssigner
@property
def measures(self):
if isinstance(self.y.selection, list):
return self.y.selection
elif isinstance(self.x.selection, list):
return self.x.selection
else:
return None
@property
def measure_input(self):
return isinstance(self.y.selection, list) or isinstance(self.x.selection, list)
def setup(self):
"""Handle input options that require transforming data and/or user selections."""
# handle special case of inputs as measures
if self.measure_input:
# Check if we stack measurements and by which attributes
stack_flags = {'color': self.attr_measurement('color'),
'dash': self.attr_measurement('dash')}
# collect the other columns used as identifiers, that aren't a measurement name
id_cols = [self.attributes[attr].columns
for attr, stack in iteritems(stack_flags) if not stack and
self.attributes[attr].columns != self.measures and
self.attributes[attr].columns is not None]
id_cols = list(chain.from_iterable(id_cols))
# if we have measures input, we need to stack by something, set default
if all(attr is False for attr in list(stack_flags.values())):
stack_flags['color'] = True
# stack the measurement dimension while keeping id columns
self._stack_measures(ids=id_cols)
# set the attributes to key off of the name of the stacked measurement, if stacked
if stack_flags['color']:
# color by the name of each variable
self.attributes['color'] = ColorAttr(columns='variable',
data=ColumnDataSource(self._data.df))
if stack_flags['dash']:
# dash by the name of each variable
self.attributes['dash'] = DashAttr(columns='variable',
data=ColumnDataSource(self._data.df))
# Handle when to use special column names
if self.x.selection is None and self.y.selection is not None:
self.x.selection = 'index'
elif self.x.selection is not None and self.y.selection is None:
self.y.selection = 'index'
def attr_measurement(self, attr_name):
"""Detect if the attribute has been given measurement columns."""
cols = self.attributes[attr_name].columns
return (cols is not None and (cols == self.y.selection or
cols == self.x.selection))
def _stack_measures(self, ids):
"""Transform data so that id columns are kept and measures are stacked in single column."""
if isinstance(self.y.selection, list):
dim = 'y'
if self.x.selection is not None:
ids.append(self.x.selection)
else:
dim = 'x'
if self.y.selection is not None:
ids.append(self.y.selection)
if len(ids) == 0:
ids = None
dim_prop = getattr(self, dim)
# transform our data by stacking the measurements into one column
self._data.stack_measures(measures=dim_prop.selection, ids=ids)
# update our dimension with the updated data
dim_prop.set_data(self._data)
def yield_renderers(self):
for group in self._data.groupby(**self.attributes):
glyph = LineGlyph(x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
line_color=group['color'],
dash=group['dash'])
# save reference to composite glyph
self.add_glyph(group, glyph)
# yield each renderer produced by composite glyph
for renderer in glyph.renderers:
yield renderer
|
bsd-3-clause
|
OpenHero/cuda-convnet2
|
convdata.py
|
3
|
10322
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = 'scalar_mean' in dp_params and dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((3,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
|
apache-2.0
|
jwiggins/scikit-image
|
doc/examples/features_detection/plot_corner.py
|
34
|
1160
|
"""
================
Corner detection
================
Detect corner points using the Harris corner detector and determine subpixel
position of corners.
.. [1] http://en.wikipedia.org/wiki/Corner_detection
.. [2] http://en.wikipedia.org/wiki/Interest_point_detection
"""
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import corner_harris, corner_subpix, corner_peaks
from skimage.transform import warp, AffineTransform
from skimage.draw import ellipse
tform = AffineTransform(scale=(1.3, 1.1), rotation=1, shear=0.7,
translation=(210, 50))
image = warp(data.checkerboard(), tform.inverse, output_shape=(350, 350))
rr, cc = ellipse(310, 175, 10, 100)
image[rr, cc] = 1
image[180:230, 10:60] = 1
image[230:280, 60:110] = 1
coords = corner_peaks(corner_harris(image), min_distance=5)
coords_subpix = corner_subpix(image, coords, window_size=13)
fig, ax = plt.subplots()
ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray)
ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3)
ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
ax.axis((0, 350, 350, 0))
plt.show()
|
bsd-3-clause
|
mantidproject/mantid
|
qt/applications/workbench/workbench/plotting/plotscriptgenerator/figure.py
|
3
|
2056
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from matplotlib import rcParams
from numpy import isclose
from mantidqt.widgets.plotconfigdialog.colorselector import convert_color_to_hex
from workbench.plotting.plotscriptgenerator.utils import convert_args_to_string
BASE_SUBPLOTS_COMMAND = "plt.subplots({})"
default_kwargs = {
'dpi': rcParams['figure.dpi'],
'edgecolor': convert_color_to_hex(rcParams['figure.edgecolor']),
'facecolor': convert_color_to_hex(rcParams['figure.facecolor']),
'figsize': rcParams['figure.figsize'],
'frameon': rcParams['figure.frameon'],
'ncols': 1,
'nrows': 1,
'num': ''
}
def get_subplots_command_kwargs(fig):
ax = fig.get_axes()[0]
kwargs = {
'dpi': fig.dpi,
'edgecolor': convert_color_to_hex(fig.get_edgecolor()),
'facecolor': convert_color_to_hex(fig.get_facecolor()),
'figsize': [fig.get_figwidth(), fig.get_figheight()],
'frameon': fig.frameon,
'ncols': ax.numCols,
'nrows': ax.numRows,
'num': fig.get_label(),
'subplot_kw': {
'projection': 'mantid'
},
}
return kwargs
def generate_subplots_command(fig):
kwargs = get_subplots_command_kwargs(fig)
kwargs = _remove_kwargs_if_default(kwargs)
return BASE_SUBPLOTS_COMMAND.format(convert_args_to_string(None, kwargs))
def _remove_kwargs_if_default(kwargs):
for kwarg, default_value in default_kwargs.items():
try:
if kwarg == 'figsize' and isclose(kwargs[kwarg], default_value, rtol=0.05).all():
kwargs.pop(kwarg)
elif kwargs[kwarg] == default_value:
kwargs.pop(kwarg)
except KeyError:
pass
return kwargs
|
gpl-3.0
|
CDSFinance/zipline
|
zipline/data/benchmarks.py
|
33
|
4096
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from datetime import datetime
import csv
from functools import partial
import requests
import pandas as pd
from six import iteritems
from . loader_utils import (
date_conversion,
source_to_records,
Mapping
)
DailyReturn = collections.namedtuple('DailyReturn', ['date', 'returns'])
class BenchmarkDataNotFoundError(Exception):
pass
_BENCHMARK_MAPPING = {
# Need to add 'symbol'
'volume': (int, 'Volume'),
'open': (float, 'Open'),
'close': (float, 'Close'),
'high': (float, 'High'),
'low': (float, 'Low'),
'adj_close': (float, 'Adj Close'),
'date': (partial(date_conversion, date_pattern='%Y-%m-%d'), 'Date')
}
def benchmark_mappings():
return {key: Mapping(*value)
for key, value
in iteritems(_BENCHMARK_MAPPING)}
def get_raw_benchmark_data(start_date, end_date, symbol):
# create benchmark files
# ^GSPC 19500103
params = collections.OrderedDict((
('s', symbol),
# start_date month, zero indexed
('a', start_date.month - 1),
# start_date day
('b', start_date.day),
# start_date year
('c', start_date.year),
# end_date month, zero indexed
('d', end_date.month - 1),
# end_date day str(int(todate[6:8])) #day
('e', end_date.day),
# end_date year str(int(todate[0:4]))
('f', end_date.year),
# daily frequency
('g', 'd'),
))
res = requests.get('http://ichart.finance.yahoo.com/table.csv',
params=params, stream=True)
if not res.ok:
raise BenchmarkDataNotFoundError("""
No benchmark data found for date range.
start_date={start_date}, end_date={end_date}, url={url}""".strip().
format(start_date=start_date,
end_date=end_date,
url=res.url))
return csv.DictReader(res.text.splitlines())
def get_benchmark_data(symbol, start_date=None, end_date=None):
"""
Benchmarks from Yahoo.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
raw_benchmark_data = get_raw_benchmark_data(start_date, end_date, symbol)
mappings = benchmark_mappings()
return source_to_records(mappings, raw_benchmark_data)
def get_benchmark_returns(symbol, start_date=None, end_date=None):
"""
Returns a list of return percentages in chronological order.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
# Get the benchmark data and convert it to a list in chronological order.
data_points = list(get_benchmark_data(symbol, start_date, end_date))
data_points.reverse()
# Calculate the return percentages.
benchmark_returns = []
for i, data_point in enumerate(data_points):
if i == 0:
curr_open = data_points[i]['open']
returns = (data_points[i]['close'] - curr_open) / curr_open
else:
prev_close = data_points[i - 1]['close']
returns = (data_point['close'] - prev_close) / prev_close
date = pd.tseries.tools.normalize_date(data_point['date'])
daily_return = DailyReturn(date=date, returns=returns)
benchmark_returns.append(daily_return)
return benchmark_returns
|
apache-2.0
|
daviddesancho/RepeatDesigner
|
examples/simple_design.py
|
1
|
2062
|
#!/usr/bin/env python
# # Villin simple design
# Here we attempt to do the simplest possible modelling, which just tries to introduce
# new side chains at random that improve the total energy. We use the simple villin
# headpiece subdomain as an example. The more advanced usage designed for repeat
# proteins are not required. We use a favourite protein as a toy model.
import matplotlib.pyplot as plt
import seaborn as sns
from repeatdesigner import designer as rd
# We will optimize a single residue, which happens to have been mutated experimentally
# into a histidine. In this case things should work so that we get a rather well
# converged optimization.
# We create an instance of the Design class, defining as target residue 25 in the sequence.
villin_des = rd.Design(pdb="pdbs/1vii.pdb", targets=[27])
# Then we create the optimizer, passing arguments like the inverse temperature (`beta`)
# that will determine the acceptance, the length of the run (`len_mc`) and the number of
# runs (`nruns`, always think about your number of processors).
mc_villin = rd.Optimizer(villin_des, beta=1e-2, len_mc=100, nruns=50)
mc_villin.run_mc()
fig, ax = plt.subplots()
for k,v in mc_villin.models.iteritems():
ax.plot(v['score'])
ax.set_ylabel('Energy', fontsize=14)
ax.set_xlabel('MC steps', fontsize=14)
plt.show()
import Bio.PDB
import Bio.Seq
import Bio.SeqUtils
import Bio.pairwise2
import Bio.SeqRecord
import Bio.Align.AlignInfo
for k,v in mc_villin.models.iteritems():
print "%3i %10.2f %s"%(k, v['score'][-1][0], v['seq'])
# Bio.Seq.Seq(''.join([Bio.SeqUtils.seq1(x.get_resname())
# for x in v['model'].get_residues()])))
print
sequences = [Bio.SeqRecord.SeqRecord(x['seq']) for k,x in mc_villin.models.iteritems()]
align = Bio.Align.MultipleSeqAlignment(sequences)
summary_align = Bio.Align.AlignInfo.SummaryInfo(align)
print " Consensus sequences:\n -------------------"
print " WT ",villin_des.seq
for t in [0.05, 0.1, 0.2, 0.5, 0.9]:
print "%.2f"%t, summary_align.dumb_consensus(threshold=(t))
|
lgpl-3.0
|
dingocuster/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_scalability.py
|
225
|
5719
|
"""
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
|
bsd-3-clause
|
jresendiz27/PatternRecognition
|
Python/WebModule/MathModule/MathModule/views.py
|
2
|
1104
|
__author__ = 'alberto'
from django.http import HttpResponse
#import Image
from django.shortcuts import render
'''
import Image
import matplotlib.pyplot as plt
from scipy import misc
jpegImage = Image.open("/home/alberto/Downloads/WP_001922.jpg")
print jpegImage.bits, jpegImage.size, jpegImage.format
print "\nok"
print "---------------------------------------\n"
l = misc.lena()
image = misc.imread("/home/alberto/Downloads/WP_001922.jpg")
plt.imshow(image)
plt.show()
print "\n"
print type(image)
print "\n"
print image.shape, image.dtype
'''
#Para cargar la pagina inicial
def index(request):
return render(request, 'index.html')
#Para regresar el contenido del cubo
def cubo(request):
contenido = "cubo"
return HttpResponse(contenido)
#para cargar la vista de las clases
def clases(request):
contenido = "clases"
return HttpResponse(contenido)
#para cargar la vista de las imagenes
def imagen(request):
contenido = "imagen"
return HttpResponse(contenido)
#para cargar la vista con las banderas ya procesadas
def banderas(request):
contenido = "Banderas"
return HttpResponse(contenido)
|
gpl-2.0
|
zhenv5/scikit-learn
|
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
|
286
|
2378
|
"""
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
|
bsd-3-clause
|
wasade/networkx
|
networkx/drawing/tests/test_pylab.py
|
45
|
1137
|
"""
Unit tests for matplotlib drawing functions.
"""
import os
from nose import SkipTest
import networkx as nx
class TestPylab(object):
@classmethod
def setupClass(cls):
global plt
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = False
except ImportError:
raise SkipTest('matplotlib not available.')
except RuntimeError:
raise SkipTest('matplotlib not available.')
def setUp(self):
self.G=nx.barbell_graph(5,10)
def test_draw(self):
try:
N=self.G
nx.draw_spring(N)
plt.savefig("test.ps")
nx.draw_random(N)
plt.savefig("test.ps")
nx.draw_circular(N)
plt.savefig("test.ps")
nx.draw_spectral(N)
plt.savefig("test.ps")
nx.draw_spring(N.to_directed())
plt.savefig("test.ps")
finally:
try:
os.unlink('test.ps')
except OSError:
pass
|
bsd-3-clause
|
NelisVerhoef/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
ericxk/MachineLearningExercise
|
perceptron.py
|
1
|
1068
|
# -*- coding:utf-8 -*-
########################################
# perceptron: perceptron
# Author : xuke
# Date : 2015-12-16
########################################
import matplotlib.pyplot as plt
import numpy
def sign(x,w,b):
res = b
for i in xrange(len(x)-1):
res += x[i+1]*w[i]
return res
def perceptron(data):
w = [-1]*(len(data[0])-1)
b = 2
i = 0
end_flag = 1000
end_num = 0
learn_rate=2
while(i< len(data) and end_num<end_flag):
d = data[i]
end_num += 1
if sign(d,w,b)*d[0]<=0:
b += learn_rate*d[0]
w = [ w1+d1 for w1,d1 in zip(w, [t*d[0]*learn_rate for t in d[1:]])]
i = 0
else:
i += 1
print "iter:",end_num
return w,b
def draw_line(data,w,b):
flag = ['b*','rs','g+','sb', 'db', '<b', 'pb']
for i in data:
plt.plot(i[1],i[2],flag[i[0]])
x = numpy.linspace(-10,10,100)
plt.plot(x,(b+w[0]*x)*(-1)/w[1])
plt.show()
if __name__=='__main__':
data=[
[1,4,3],
[-1,1,1],
[-1,3,1],
[1,1,3],
[1,4,6],
[-1,-1,3],
[1,3,9],
[-1,4,1],
[1,4,4],
]
w,b = perceptron(data)
print w,b
draw_line(data,w,b)
|
mit
|
jdparkins/MasiPlot
|
hapi.py
|
1
|
628709
|
# -*- coding: utf-8 -*-
'''
This module provides an access to the HITRAN data.
Data is downloaded and cached.
This module serves as a simple database manager frontend.
API is aimed to be RESTful, which means that interaction
between local API and remote data-server will be held
via sending RESTful queries (API->remote) and
receiving data preferrably in text format (remote->API).
Object are supposed to be implemented by structures/dicts
as they present in almost any programming language.
Trying to retain functional style for this API.
'''
# import httplib
# import urllib2
import json
import os, os.path
import re
from os import listdir
from numpy import zeros, array, zeros, setdiff1d, ndarray, arange
from numpy import place, where, insert, real, polyval
from numpy import complex128, complex64, int64, int32, float64, float32
from numpy import sqrt, abs, exp, pi, log, sin, cos, tan
from numpy import convolve
from numpy import flipud
from numpy.fft import fft, fftshift
from numpy import linspace, floor
from numpy import any, minimum, maximum
from numpy import modf
from numpy import sort as npsort
from bisect import bisect
# from collections import OrderedDict
from warnings import warn, simplefilter
import pydoc
# Enable warning repetitions
simplefilter('always', UserWarning)
# Python 3 compatibility
try:
import urllib.request as urllib2
except ImportError:
import urllib2
HAPI_VERSION = '1.1.0.6'
# CHANGES:
# FIXED GRID BUG (ver. 1.1.0.1)
# FIXED OUTPUT FORMAT FOR CROSS-SECTIONS (ver. 1.1.0.1)
# ADDED CPF BY SCHREIER (JQSRT_112_2011) (ver. 1.1.0.2)
# OPTIMIZED EXPRESSION EVALUATIONS FOR SELECT (ver. 1.1.0.3)
# ADDED SUPPORT FOR MIXTURES (ver. 1.1.0.4)
# ADDED SUPPORT FOR USER-DEFINED ENV DEPENDENCES (ver. 1.1.0.5)
# ADDED PROFILE SELECTION (ALPHA) (ver. 1.1.0.6)
# version header
# print('HAPI version: %s' % HAPI_VERSION)
# print('To get the most up-to-date version please check http://hitran.org/hapi')
# print('ATTENTION: Python versions of partition sums from TIPS-2017 are available at http://hitran.org/suppl/TIPS/')
# print('To use them in HAPI ver. 1.1.0.6, use partitionFunction parameter of the absorptionCoefficient_ routine.')
# define precision
__ComplexType__ = complex128
__IntegerType__ = int64
__FloatType__ = float64
# define zero
cZero = __FloatType__(0.)
# physical constants
cBolts = 1.380648813E-16 # erg/K, CGS
cc = 2.99792458e10 # cm/s, CGS
hh = 6.626196e-27 # erg*s, CGS
# computational constants
cSqrtLn2divSqrtPi = 0.469718639319144059835
cLn2 = 0.6931471805599
cSqrtLn2 = 0.8325546111577
cSqrt2Ln2 = 1.1774100225
# declare global variables
GLOBAL_DEBUG = False
if GLOBAL_DEBUG: warn('GLOBAL_DEBUG is set to True!')
GLOBAL_CURRENT_DIR = '.'
GLOBAL_HITRAN_APIKEY = 'e20e4bd3-e12c-4931-99e0-4c06e88536bd'
GLOBAL_USER = 'user'
GLOBAL_REQUISITES = []
GLOBAL_CONNECTION = []
GLOBAL_DATABASE = 'hitran'
LOCAL_HOST = 'http://localhost'
# DEBUG switch
if GLOBAL_DEBUG:
GLOBAL_HOST = LOCAL_HOST + ':8000' # localhost
else:
GLOBAL_HOST = 'http://hitran.org'
# this is a backup url in the case GLOBAL_HOST does not work
GLOBAL_HOST_BACKUP = 'http://hitranazure.cloudapp.net/'
# In this "robust" version of arange the grid doesn't suffer
# from the shift of the nodes due to error accumulation.
# This effect is pronounced only if the step is sufficiently small.
def arange_(lower, upper, step):
npnt = floor((upper - lower) / step) + 1
upper_new = lower + step * (npnt - 1)
if abs((upper - upper_new) - step) < 1e-10:
upper_new += step
npnt += 1
return linspace(lower, upper_new, npnt)
# interface for checking of variable's existance
def empty(Instance):
return True if Instance else False
# general interface for getattr
def getAttribute(Object, Attribute):
return getattr(Object, Attribute)
# general interface for setattr
def setAttribute(Object, Attribute, Value):
setattr(Object, Attribute, Value)
return
# UNPARSED QUERY OBJECT
# uses formal language (SQL, noSQL, custom...)
GlobalQueryString = ''
# PARSED QUERY OBJECT
# = prototype for a Query instance
# there should be a getAttrbute/setSettribute functions defined
# For Django: Query=QuerySet (as an example)
Query = {}
# prototype for cache storage
# there must be function for record/retrieve
# caching is performed by the value of Query
# cache parameters: (query+table_name)
# if there is already table with such query, copy it
# if there is already tble with such query AND table_name,
# return it as is => IT MAY DEPEND ON CERTAIN QUERY TYPE!!
TABLES = {} # hash/dictionary
# ---------- CONNECTION MANAGEMENT-------------
# interface for establishing HTTP connection
# can return object/structure/handle
def setupConnection(Host=GLOBAL_HOST):
Connection = httplib.HTTPConnection(Host)
if not empty(Connection):
return Connection
else:
raise Exception('can''t setup connection')
# interface for HTTP-get method
# Connection must be established before use
def httpGet(URL, Connection=GLOBAL_CONNECTION):
Method = 'get'
ServerResponse = Connection.request(Method, URL)
return ServerResponse
# parse local data language to remote frontend
def parseToFrontend(Query, Host=GLOBAL_HOST):
# convert Query object to server frontend's
# query language
pass
def prepareURL(Query, Connection=GLOBAL_CONNECTION):
# make full URL from server name and it's parameters
# considering server's frontend query language
Host = getAttribute(Connection, 'host')
HostQuery = parseToFrontend(Query)
URL = Host + HostQuery
return URL
# stream raw data from the server
# the data is assumed to be very large that
# ordinary get is unefficient
def streamRawDataRemote(Query, Connection=GLOBAL_CONNECTION):
pass
# collect raw data in whatever format server gives it
def getRawDataRemote(Query, Connection=GLOBAL_CONNECTION):
URL = prepareURL(Query, Connection)
ServerResponse = httpGet(URL, Connection)
return ServerResponse
## parse raw data
# def parseRawData(RawData)
# pass
# ---------- CONNECTION MANAGEMEND END --------
# Two types of interaction between API and DB:
# 1) via API library
# 2) via REST http protocol (torrent-like)
# ---------- NODE MANAGEMENT ------------------
# An interface for a node manager will follow soon.
# This is an implementation in Python
# Different implementations are language-specific.
# Default node with simple DB engine
# Prototype for a global nodelist for a given host
# Each node has it's unique ID, host name and
# node name within it's host
NODE_NAME = 'local'
GLOBAL_NODENAMES = {
0: 'hitran-main',
1: 'local'
}
GLOBAL_NODELIST = {
0: { # main HITRAN node
'host': GLOBAL_HOST,
'ACCESS_KEY': '9b6a7975-2a84-43d8-920e-f4dea9db6805' # guest
},
1: { # local node prototype
'host': LOCAL_HOST,
'ACCESS_KEY': '6cfd7040-24a6-4197-81f9-6e25e50005b2', # admin
}
}
def createNode(NodeID, NodeList=GLOBAL_NODELIST):
# create a node, throw if exists
node = NodeList.get(NodeID)
if node: raise Exception('node %s already exists' % NodeName)
NodeList[NodeID] = {}
pass
def getNodeIDs(NodeList=GLOBAL_NODELIST):
# return list of all available nodes
return NodeList.keys()
def getNodeProperty(NodeID, PropName, NodeList=GLOBAL_NODELIST):
# get a property for certain node
# if not found throw exception
node = NodeList.get(NodeName)
if node:
prop = node.get(PropName)
if prop:
return prop
else:
raise Exception('node %s doesn''t have property %s' % (ModeName, Propname))
else:
raise Exception('no such node %s' % Nodename)
def setNodeProperty(NodeID, PropName, PropValue, NodeList=GLOBAL_NODELIST):
# set a property for certain node
# throw exception if node not found
# if the property doesn't exist it will appear
node = NodeList.get(NodeID)
if not node: raise Exception('no such node %s ' % NodeName)
NodeList[PropName] = PropValue
return
def resolveNodeID(NodeName, NodeNames=GLOBAL_NODENAMES):
for NodeID in NodeNames.keys():
if NodeNames[NodeID] == NodeName: return NodeID
def checkAccess(DBName, TableName, NodeName, UserName, Requisites, NodeList=GLOBAL_NODELIST,
NodeNames=GLOBAL_NODENAMES):
# simple node-level authentication (bridge to AUTH system)
NodeID = resolveNodeID(NodeName, NodeNames)
Node = NodeList[NodeID]
if Requisites.key in Node['keys_allowed']:
return True
else:
return False
# ---------- NODE MANAGEMENT END --------------
# ---------- NODE AUTH SYSTEM -----------------
# AUTH SYSTEM is tightly connected to Node manager.
# Prototype for authentication system.
# AUTH is responsible for giving an access privileges to all users.
# Each users has a key ACCESS_KEY which is stored in
# a special database HOST:ACCESS_KEYS on a host.
# Every node has a separate privileges list connected with
# each key.
# The current auth system is based on secret keys of access
# Default key is 'admin', it's created seamlessly for a local admin.
GLOBAL_PRIVILEGES = {
'admin': {
'ACCESS_KEY': '6cfd7040-24a6-4197-81f9-6e25e50005b2',
'LEVEL': 'ADMIN'
},
'guest': {
'ACCESS_KEY': '9b6a7975-2a84-43d8-920e-f4dea9db6805',
'LEVEL': 'USER'
}
}
def addUser():
pass
def deleteUser():
pass
def authenticate(UserName, Requisites, Privileges=GLOBAL_PRIVILEGES):
# Authentication
key_list = [Privileges[User]['ACCESS_KEY'] for User in Privileges.keys]
return True if Requisites.AccessKey in key_list else False
def checkPrivileges(Path, UserName=GLOBAL_USER, Requisites=GLOBAL_REQUISITES,
Privileges=GLOBAL_PRIVILEGES, NodeList=GLOBAL_NODELIST, Nodenames=GLOBAL_NODENAMES):
# Privileges are checked before executing every query (needs optimization)
# Path example: SOME_DB::SOME_TABLE::SOME_NODE
if not authenticate(UserName, Requisites, Privileges): return False
(DBName, TableName, NodeName) = Path.split('::')
# loop on all nodes , use NODE_MANAGER's functions instead of
# working with GLOBAL_NODELIST directly
if not checkAccess(DBName, TableName, NodeName, UserName, Requisites, NodeList, NodeNames):
return False
return True
# ---------- NODE AUTH SYSTEM END -------------
# ---------- DATABASE FRONTEND ----------------
# Structure:
# DB::TABLE::NODE
# DB - distributed database
# TABLE - table within the current database
# NODE - instance of this API with fixed DB backend
# !! parameter HOST is deprecated
# HOST - computer at which the NODE/ENGINE is deployed
# TABLE should be considered as schema-free collection
# (e.g. MongoDB-type)
### Two databases (DB) - GLOBAL (one) and LOCAL (many)
# Every DB has an ACCESS_KEY providing an access to it
# User can create a database and it will contain
# a list of ACCESS_KEY's for authentication.
### GLOBAL AND LOCAL are distributed databases.
### A user can create his GLOBAL database and open an access to it.
### GLOBAL access implementation:
### GLOBAL is a distributed database
# The DB frontend contains interfaces to
# the standard procedures of data creation and
# retrieval of an "average" DBMS.
# ("collection" = table)
#
# Levels of access: (DB permissions implementation)
# 0:USER read-only operations ("select")
# 1:MANAGER manage single DB (create/delete docs)
# 2:ADMIN manage multiple DB's (create/delete DB)
#
# Every ACCESS_KEY has it's own access level.
#
# Commands to implement:
#
# ) create DATABASE
# ) create ACCESS_KEY
# (seamlessly for the local user)
# ) select from LOCAL/GLOBAL doc (cached!)
# ) access database
# (seamlessly for the local user)
# ) create/delete doc
# ) copy/clone LOCAL doc
# ) "create collection as select * from HOST:ENGINE:DB:COLLECTION"
# (other types of table creations are forbidden)
# DB frontend is adapted to denormalized
# schema-fixed tables or schema-independent documents.
# DB frontend is connected to multiple backends
# which are largely language-specific.
### ATTENTION: since the system is distributed,
### the table/document caching is supposed to
### be in the frontend.
### Current higher-level implementation
### implies the query-based caching, i.e.
### cache lookup is performed by the value
### of Query structure/object.
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# LOCAL DATABASE MANAGEMENT SYSTEM
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# DATABASE BACKEND: simple text files, parsed into a python lists
# Use a directory as a database. Each table is stored in a
# separate text file. Parameters in text are position-fixed.
BACKEND_DATABASE_NAME_DEFAULT = '.'
VARIABLES = {}
VARIABLES['BACKEND_DATABASE_NAME'] = BACKEND_DATABASE_NAME_DEFAULT
# For this node local DB is schema-dependent!
LOCAL_TABLE_CACHE = {
'sampletab': { # table
'header': { # header
'order': ('column1', 'column2', 'column3'),
'format': {
'column1': '%10d',
'column2': '%20f',
'column3': '%30s'
},
'default': {
'column1': 0,
'column2': 0.0,
'column3': ''
},
'number_of_rows': 3,
'size_in_bytes': None,
'table_name': 'sampletab',
'table_type': 'strict'
}, # /header
'data': {
'column1': [1, 2, 3],
'column2': [10.5, 11.5, 12.5],
'column3': ['one', 'two', 'three']
}, # /data
} # /table
} # hash-map of tables
# FORMAT CONVERSION LAYER
# converts between TRANSPORT_FORMAT and OBJECT_FORMAT
HITRAN_FORMAT_160 = {
'M': {'pos': 1, 'len': 2, 'format': '%2d'},
'I': {'pos': 3, 'len': 1, 'format': '%1d'},
'nu': {'pos': 4, 'len': 12, 'format': '%12f'},
'S': {'pos': 16, 'len': 10, 'format': '%10f'},
'R': {'pos': 26, 'len': 0, 'format': '%0f'},
'A': {'pos': 26, 'len': 10, 'format': '%10f'},
'gamma_air': {'pos': 36, 'len': 5, 'format': '%5f'},
'gamma_self': {'pos': 41, 'len': 5, 'format': '%5f'},
'E_': {'pos': 46, 'len': 10, 'format': '%10f'},
'n_air': {'pos': 56, 'len': 4, 'format': '%4f'},
'delta_air': {'pos': 60, 'len': 8, 'format': '%8f'},
'V': {'pos': 68, 'len': 15, 'format': '%15s'},
'V_': {'pos': 83, 'len': 15, 'format': '%15s'},
'Q': {'pos': 98, 'len': 15, 'format': '%15s'},
'Q_': {'pos': 113, 'len': 15, 'format': '%15s'},
'Ierr': {'pos': 128, 'len': 6, 'format': '%6s'},
'Iref': {'pos': 134, 'len': 12, 'format': '%12s'},
'flag': {'pos': 146, 'len': 1, 'format': '%1s'},
'g': {'pos': 147, 'len': 7, 'format': '%7f'},
'g_': {'pos': 154, 'len': 7, 'format': '%7f'}
}
# This should be generating from the server's response
HITRAN_DEFAULT_HEADER = {
"table_type": "column-fixed",
"size_in_bytes": -1,
"table_name": "###",
"number_of_rows": -1,
"order": [
"molec_id",
"local_iso_id",
"nu",
"sw",
"a",
"gamma_air",
"gamma_self",
"elower",
"n_air",
"delta_air",
"global_upper_quanta",
"global_lower_quanta",
"local_upper_quanta",
"local_lower_quanta",
"ierr",
"iref",
"line_mixing_flag",
"gp",
"gpp"
],
"format": {
"a": "%10.3E",
"gamma_air": "%5.4f",
"gp": "%7.1f",
"local_iso_id": "%1d",
"molec_id": "%2d",
"sw": "%10.3E",
"local_lower_quanta": "%15s",
"local_upper_quanta": "%15s",
"gpp": "%7.1f",
"elower": "%10.4f",
"n_air": "%4.2f",
"delta_air": "%8.6f",
"global_upper_quanta": "%15s",
"iref": "%12s",
"line_mixing_flag": "%1s",
"ierr": "%6s",
"nu": "%12.6f",
"gamma_self": "%5.3f",
"global_lower_quanta": "%15s"
},
"default": {
"a": 0.0,
"gamma_air": 0.0,
"gp": "FFF",
"local_iso_id": 0,
"molec_id": 0,
"sw": 0.0,
"local_lower_quanta": "000",
"local_upper_quanta": "000",
"gpp": "FFF",
"elower": 0.0,
"n_air": 0.0,
"delta_air": 0.0,
"global_upper_quanta": "000",
"iref": "EEE",
"line_mixing_flag": "EEE",
"ierr": "EEE",
"nu": 0.0,
"gamma_self": 0.0,
"global_lower_quanta": "000"
},
"description": {
"a": "Einstein A-coefficient in s-1",
"gamma_air": "Air-broadened Lorentzian half-width at half-maximum at p = 1 atm and T = 296 K",
"gp": "Upper state degeneracy",
"local_iso_id": "Integer ID of a particular Isotopologue, unique only to a given molecule, in order or abundance (1 = most abundant)",
"molec_id": "The HITRAN integer ID for this molecule in all its isotopologue forms",
"sw": "Line intensity, multiplied by isotopologue abundance, at T = 296 K",
"local_lower_quanta": "Rotational, hyperfine and other quantum numbers and labels for the lower state of a transition",
"local_upper_quanta": "Rotational, hyperfine and other quantum numbers and labels for the upper state of a transition",
"gpp": "Lower state degeneracy",
"elower": "Lower-state energy",
"n_air": "Temperature exponent for the air-broadened HWHM",
"delta_air": "Pressure shift induced by air, referred to p=1 atm",
"global_upper_quanta": "Electronic and vibrational quantum numbers and labels for the upper state of a transition",
"iref": "Ordered list of reference identifiers for transition parameters",
"line_mixing_flag": "A flag indicating the presence of additional data and code relating to line-mixing",
"ierr": "Ordered list of indices corresponding to uncertainty estimates of transition parameters",
"nu": "Transition wavenumber",
"gamma_self": "Self-broadened HWHM at 1 atm pressure and 296 K",
"global_lower_quanta": "Electronic and vibrational quantum numbers and labels for the lower state of a transition"
},
}
PARAMETER_META = \
{
"global_iso_id": {
"id": 1,
"name": "global_iso_id",
"name_html": "Global isotopologue ID",
"table_name": "",
"description": "Unique integer ID of a particular isotopologue: every global isotopologue ID is unique to a particular species, even between different molecules. The number itself is, however arbitrary.",
"description_html": "Unique integer ID of a particular isotopologue: every global isotopologue ID is unique to a particular species, even between different molecules. The number itself is, however arbitrary.",
"default_fmt": "%5d",
"default_units": "",
"data_type": "int",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"molec_id": {
"id": 2,
"name": "molec_id",
"name_html": "Molecule ID",
"table_name": "",
"description": "The HITRAN integer ID for this molecule in all its isotopologue forms",
"description_html": "The HITRAN integer ID for this molecule in all its isotopologue forms",
"default_fmt": "%2d",
"default_units": None,
"data_type": "int",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"local_iso_id": {
"id": 3,
"name": "local_iso_id",
"name_html": "Isotopologue ID",
"table_name": "",
"description": "Integer ID of a particular Isotopologue, unique only to a given molecule, in order or abundance (1 = most abundant)",
"description_html": "Integer ID of a particular Isotopologue, unique only to a given molecule, in order or abundance (1 = most abundant)",
"default_fmt": "%1d",
"default_units": "",
"data_type": "int",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"nu": {
"id": 4,
"name": "nu",
"name_html": "<em>ν</em>",
"table_name": "prm_nu",
"description": "Transition wavenumber",
"description_html": "Transition wavenumber",
"default_fmt": "%12.6f",
"default_units": "cm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"sw": {
"id": 5,
"name": "sw",
"name_html": "<em>S</em>",
"table_name": "prm_sw",
"description": "Line intensity, multiplied by isotopologue abundance, at T = 296 K",
"description_html": "Line intensity, multiplied by isotopologue abundance, at T = 296 K",
"default_fmt": "%10.3e",
"default_units": "cm-1/(molec.cm-2)",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"a": {
"id": 6,
"name": "a",
"name_html": "<em>A</em>",
"table_name": "prm_a",
"description": "Einstein A-coefficient in s-1",
"description_html": "Einstein <em>A</em>-coefficient",
"default_fmt": "%10.3e",
"default_units": "s-1",
"data_type": "float",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"gamma_air": {
"id": 7,
"name": "gamma_air",
"name_html": "<em>γ</em><sub>air</sub>",
"table_name": "prm_gamma_air",
"description": "Air-broadened Lorentzian half-width at half-maximum at p = 1 atm and T = 296 K",
"description_html": "Air-broadened Lorentzian half-width at half-maximum at p = 1 atm and T = 296 K",
"default_fmt": "%6.4f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"gamma_self": {
"id": 8,
"name": "gamma_self",
"name_html": "<em>γ</em><sub>self</sub>",
"table_name": "prm_gamma_self",
"description": "Self-broadened HWHM at 1 atm pressure and 296 K",
"description_html": "Self-broadened HWHM at 1 atm pressure and 296 K",
"default_fmt": "%5.3f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"n_air": {
"id": 9,
"name": "n_air",
"name_html": "<em>n</em><sub>air</sub>",
"table_name": "prm_n_air",
"description": "Temperature exponent for the air-broadened HWHM",
"description_html": "Temperature exponent for the air-broadened HWHM",
"default_fmt": "%7.4f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"delta_air": {
"id": 10,
"name": "delta_air",
"name_html": "<em>δ</em><sub>air</sub>",
"table_name": "prm_delta_air",
"description": "Pressure shift induced by air, referred to p=1 atm",
"description_html": "Pressure shift induced by air, referred to <em>p</em>=1 atm",
"default_fmt": "%9.6f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"elower": {
"id": 11,
"name": "elower",
"name_html": "<em>E\"</em>",
"table_name": "",
"description": "Lower-state energy",
"description_html": "Lower-state energy",
"default_fmt": "%10.4f",
"default_units": "cm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"gp": {
"id": 12,
"name": "gp",
"name_html": "<em>g</em>\'",
"table_name": "",
"description": "Upper state degeneracy",
"description_html": "Upper state degeneracy",
"default_fmt": "%5d",
"default_units": "",
"data_type": "int",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"gpp": {
"id": 13,
"name": "gpp",
"name_html": "<em>g</em>\"",
"table_name": "",
"description": "Lower state degeneracy",
"description_html": "Lower state degeneracy",
"default_fmt": "%5d",
"default_units": "",
"data_type": "int",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"global_upper_quanta": {
"id": 14,
"name": "global_upper_quanta",
"name_html": "Global upper quanta",
"table_name": "",
"description": "Electronic and vibrational quantum numbers and labels for the upper state of a transition",
"description_html": "Electronic and vibrational quantum numbers and labels for the upper state of a transition",
"default_fmt": "%15s",
"default_units": None,
"data_type": "str",
"selectable": 0,
"has_reference": 0,
"has_error": 0
},
"global_lower_quanta": {
"id": 15,
"name": "global_lower_quanta",
"name_html": "Global lower quanta",
"table_name": "",
"description": "Electronic and vibrational quantum numbers and labels for the lower state of a transition",
"description_html": "Electronic and vibrational quantum numbers and labels for the lower state of a transition",
"default_fmt": "%15s",
"default_units": None,
"data_type": "str",
"selectable": 0,
"has_reference": 0,
"has_error": 0
},
"local_upper_quanta": {
"id": 16,
"name": "local_upper_quanta",
"name_html": "Local upper quanta",
"table_name": "",
"description": "Rotational, hyperfine and other quantum numbers and labels for the upper state of a transition",
"description_html": "Rotational, hyperfine and other quantum numbers and labels for the upper state of a transition",
"default_fmt": "%15s",
"default_units": None,
"data_type": "str",
"selectable": 0,
"has_reference": 0,
"has_error": 0
},
"local_lower_quanta": {
"id": 17,
"name": "local_lower_quanta",
"name_html": "Local lower quanta",
"table_name": "",
"description": "Rotational, hyperfine and other quantum numbers and labels for the lower state of a transition",
"description_html": "Rotational, hyperfine and other quantum numbers and labels for the lower state of a transition",
"default_fmt": "%15s",
"default_units": None,
"data_type": "str",
"selectable": 0,
"has_reference": 0,
"has_error": 0
},
"line_mixing_flag": {
"id": 18,
"name": "line_mixing_flag",
"name_html": "Line mixing flag",
"table_name": "",
"description": "A flag indicating the presence of additional data and code relating to line-mixing",
"description_html": "A flag indicating the presence of additional data and code relating to line-mixing",
"default_fmt": "%1s",
"default_units": "",
"data_type": "str",
"selectable": 0,
"has_reference": 0,
"has_error": 0
},
"ierr": {
"id": 19,
"name": "ierr",
"name_html": "Error indices",
"table_name": "",
"description": "Ordered list of indices corresponding to uncertainty estimates of transition parameters",
"description_html": "Ordered list of indices corresponding to uncertainty estimates of transition parameters",
"default_fmt": "%s",
"default_units": "",
"data_type": "str",
"selectable": 0,
"has_reference": 0,
"has_error": 0
},
"iref": {
"id": 20,
"name": "iref",
"name_html": "References",
"table_name": "",
"description": "Ordered list of reference identifiers for transition parameters",
"description_html": "Ordered list of reference identifiers for transition parameters",
"default_fmt": "%s",
"default_units": None,
"data_type": "str",
"selectable": 0,
"has_reference": 0,
"has_error": 0
},
"deltap_air": {
"id": 21,
"name": "deltap_air",
"name_html": "<em>δ\'</em><sub>air</sub>",
"table_name": "prm_deltap_air",
"description": "Linear temperature dependence coefficient for air-induced pressure shift",
"description_html": "Linear temperature dependence coefficient for air-induced pressure shift",
"default_fmt": "%10.3e",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"n_self": {
"id": 22,
"name": "n_self",
"name_html": "<em>n</em><sub>self</sub>",
"table_name": "prm_n_self",
"description": "Temperature exponent for the self-broadened HWHM",
"description_html": "Temperature exponent for the self-broadened HWHM",
"default_fmt": "%7.4f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"delta_self": {
"id": 23,
"name": "delta_self",
"name_html": "<em>δ</em><sub>self</sub>",
"table_name": "prm_delta_self",
"description": "Self-induced pressure shift, referred to p=1 atm",
"description_html": "Self-induced pressure shift, referred to <em>p</em>=1 atm",
"default_fmt": "%9.6f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"deltap_self": {
"id": 24,
"name": "deltap_self",
"name_html": "<em>δ\'</em><sub>self</sub>",
"table_name": "prm_deltap_self",
"description": "Linear temperature dependence coefficient for self-induced pressure shift",
"description_html": "Linear temperature dependence coefficient for self-induced pressure shift",
"default_fmt": "%10.3e",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"sd_air": {
"id": 28,
"name": "SD_air",
"name_html": "SD</sub>air</sub>",
"table_name": "prm_sd_air",
"description": "Speed-dependence parameter, air-broadened lines",
"description_html": "Speed-dependence parameter, air-broadened lines",
"default_fmt": "%9.6f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"sd_self": {
"id": 29,
"name": "SD_self",
"name_html": "SD</sub>self</sub>",
"table_name": "prm_sd_self",
"description": "Speed-dependence parameter, self-broadened lines",
"description_html": "Speed-dependence parameter, self-broadened lines",
"default_fmt": "%9.6f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"beta_g_air": {
"id": 30,
"name": "beta_g_air",
"name_html": "<em>β</em><sub>G, air</sub>",
"table_name": "prm_beta_g_air",
"description": "Dicke narrowing parameter for the air broadened Galatry line profile",
"description_html": "Dicke narrowing parameter for the air broadened Galatry line profile",
"default_fmt": "%9.6f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"y_self": {
"id": 31,
"name": "y_self",
"name_html": "<em>Y</em><sub>self</sub>",
"table_name": "prm_y_self",
"description": "First-order (Rosenkranz) line coupling coefficient; self-broadened environment",
"description_html": "First-order (Rosenkranz) line coupling coefficient; self-broadened environment",
"default_fmt": "%10.3e",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"y_air": {
"id": 32,
"name": "y_air",
"name_html": "<em>Y</em><sub>air</sub>",
"table_name": "prm_y_air",
"description": "First-order (Rosenkranz) line coupling coefficient; air-broadened environment",
"description_html": "First-order (Rosenkranz) line coupling coefficient; air-broadened environment",
"default_fmt": "%10.3e",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"statep": {
"id": 33,
"name": "statep",
"name_html": "qns\'",
"table_name": "",
"description": "Upper state quantum numbers",
"description_html": "Upper state quantum numbers",
"default_fmt": "%256s",
"default_units": "",
"data_type": "str",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"statepp": {
"id": 34,
"name": "statepp",
"name_html": "qns\"",
"table_name": "",
"description": "Lower state quantum numbers",
"description_html": "Lower state quantum numbers",
"default_fmt": "%256s",
"default_units": "",
"data_type": "str",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"beta_g_self": {
"id": 35,
"name": "beta_g_self",
"name_html": "<em>β</em><sub>G, self</sub>",
"table_name": "prm_beta_g_self",
"description": "Dicke narrowing parameter for the self-broadened Galatry line profile",
"description_html": "Dicke narrowing parameter for the self-broadened Galatry line profile",
"default_fmt": "%9.6f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"trans_id": {
"id": 36,
"name": "trans_id",
"name_html": "Transition ID",
"table_name": "",
"description": "Unique integer ID of a particular transition entry in the database. (The same physical transition may have different IDs if its parameters have been revised or updated).",
"description_html": "Unique integer ID of a particular transition entry in the database. (The same physical transition may have different IDs if its parameters have been revised or updated).",
"default_fmt": "%12d",
"default_units": "",
"data_type": "int",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"par_line": {
"id": 37,
"name": "par_line",
"name_html": ".par line",
"table_name": "",
"description": "Native 160-character formatted HITRAN line",
"description_html": "Native 160-character formatted HITRAN line",
"default_fmt": "%160s",
"default_units": "",
"data_type": "str",
"selectable": 1,
"has_reference": 0,
"has_error": 0
},
"gamma_h2": {
"id": 38,
"name": "gamma_H2",
"name_html": "<em>γ</em><sub>H2</sub> ",
"table_name": "prm_gamma_H2",
"description": "Lorentzian lineshape HWHM due to pressure broadening by H2 at 1 atm pressure",
"description_html": "Lorentzian lineshape HWHM due to pressure broadening by H<sub>2</sub> at 1 atm pressure",
"default_fmt": "%6.4f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"n_h2": {
"id": 39,
"name": "n_H2",
"name_html": "<em>n</em><sub>H2</sub>",
"table_name": "prm_n_H2",
"description": "Temperature exponent for the H2-broadened HWHM",
"description_html": "Temperature exponent for the H<sub>2</sub>-broadened HWHM",
"default_fmt": "%7.4f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"delta_h2": {
"id": 40,
"name": "delta_H2",
"name_html": "<em>δ</em><sub>H2</sub>",
"table_name": "prm_delta_H2",
"description": "Pressure shift induced by H2, referred to p=1 atm",
"description_html": "Pressure shift induced by H<sub>2</sub>, referred to <em>p</em>=1 atm",
"default_fmt": "%9.6f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"deltap_h2": {
"id": 41,
"name": "deltap_H2",
"name_html": "<em>δ\'</em><sub>H2</sub>",
"table_name": "prm_deltap_H2",
"description": "Linear temperature dependence coefficient for H2-induced pressure shift",
"description_html": "Linear temperature dependence coefficient for H<sub>2</sub>-induced pressure shift",
"default_fmt": "%10.3e",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"gamma_he": {
"id": 42,
"name": "gamma_He",
"name_html": "<em>γ</em><sub>He</sub> ",
"table_name": "prm_gamma_He",
"description": "Lorentzian lineshape HWHM due to pressure broadening by He at 1 atm pressure",
"description_html": "Lorentzian lineshape HWHM due to pressure broadening by He at 1 atm pressure",
"default_fmt": "%6.4f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"n_he": {
"id": 43,
"name": "n_He",
"name_html": "<em>n</em><sub>He</sub>",
"table_name": "prm_n_He",
"description": "Temperature exponent for the He-broadened HWHM",
"description_html": "Temperature exponent for the He-broadened HWHM",
"default_fmt": "%7.4f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"delta_he": {
"id": 44,
"name": "delta_He",
"name_html": "<em>δ</em><sub>He</sub>",
"table_name": "prm_delta_He",
"description": "Pressure shift induced by He, referred to p=1 atm",
"description_html": "Pressure shift induced by He, referred to <em>p</em>=1 atm",
"default_fmt": "%9.6f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"gamma_co2": {
"id": 45,
"name": "gamma_CO2",
"name_html": "<em>γ</em><sub>CO2</sub> ",
"table_name": "prm_gamma_CO2",
"description": "Lorentzian lineshape HWHM due to pressure broadening by CO2 at 1 atm pressure",
"description_html": "Lorentzian lineshape HWHM due to pressure broadening by CO<sub>2</sub> at 1 atm pressure",
"default_fmt": "%6.4f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"n_co2": {
"id": 46,
"name": "n_CO2",
"name_html": "<em>n</em><sub>CO2</sub>",
"table_name": "prm_n_CO2",
"description": "Temperature exponent for the CO2-broadened HWHM",
"description_html": "Temperature exponent for the CO<sub>2</sub>-broadened HWHM",
"default_fmt": "%7.4f",
"default_units": "",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"delta_co2": {
"id": 47,
"name": "delta_CO2",
"name_html": "<em>δ</em><sub>CO2</sub>",
"table_name": "prm_delta_CO2",
"description": "Pressure shift induced by CO2, referred to p=1 atm",
"description_html": "Pressure shift induced by CO<sub>2</sub>, referred to <em>p</em>=1 atm",
"default_fmt": "%9.6f",
"default_units": "cm-1.atm-1",
"data_type": "float",
"selectable": 1,
"has_reference": 1,
"has_error": 1
},
"gamma_HT_0_self_50": {
"default_fmt": "%6.4f",
},
"n_HT_self_50": {
"default_fmt": "%9.6f",
},
"gamma_HT_2_self_50": {
"default_fmt": "%6.4f",
},
"delta_HT_0_self_50": {
"default_fmt": "%9.6f",
},
"deltap_HT_self_50": {
"default_fmt": "%9.6f",
},
"delta_HT_2_self_50": {
"default_fmt": "%9.6f",
},
"gamma_HT_0_self_150": {
"default_fmt": "%6.4f",
},
"n_HT_self_150": {
"default_fmt": "%9.6f",
},
"gamma_HT_2_self_150": {
"default_fmt": "%6.4f",
},
"delta_HT_0_self_150": {
"default_fmt": "%9.6f",
},
"deltap_HT_self_150": {
"default_fmt": "%9.6f",
},
"delta_HT_2_self_150": {
"default_fmt": "%9.6f",
},
"gamma_HT_0_self_296": {
"default_fmt": "%6.4f",
},
"n_HT_self_296": {
"default_fmt": "%9.6f",
},
"gamma_HT_2_self_296": {
"default_fmt": "%6.4f",
},
"delta_HT_0_self_296": {
"default_fmt": "%9.6f",
},
"deltap_HT_self_296": {
"default_fmt": "%9.6f",
},
"delta_HT_2_self_296": {
"default_fmt": "%9.6f",
},
"gamma_HT_0_self_700": {
"default_fmt": "%6.4f",
},
"n_HT_self_700": {
"default_fmt": "%9.6f",
},
"gamma_HT_2_self_700": {
"default_fmt": "%6.4f",
},
"delta_HT_0_self_700": {
"default_fmt": "%9.6f",
},
"deltap_HT_self_700": {
"default_fmt": "%9.6f",
},
"delta_HT_2_self_700": {
"default_fmt": "%9.6f",
},
"nu_HT_self": {
"default_fmt": "%6.4f",
},
"kappa_HT_self": {
"default_fmt": "%9.6f",
},
"eta_HT_self": {
"default_fmt": "%9.6f",
},
}
def transport2object(TransportData):
pass
def object2transport(ObjectData):
pass
def getFullTableAndHeaderName(TableName):
# print('TableName=',TableName)
fullpath_data = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.data'
if not os.path.isfile(fullpath_data):
fullpath_data = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.par'
if not os.path.isfile(fullpath_data) and TableName != 'sampletab':
raise Exception('Lonely header \"%s\"' % fullpath_data)
fullpath_header = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.header'
return fullpath_data, fullpath_header
def getParameterFormat(ParameterName, TableName):
return LOCAL_TABLE_CACHE[TableName]['header']['format']
def getTableHeader(TableName):
return LOCAL_TABLE_CACHE[TableName]['header']
# RowObject = list of tuples like (name,value,format)
def addRowObject(RowObject, TableName):
# add RowObject to TableObject in CACHE
# check consistency first
if [p[0] for p in RowObject] != LOCAL_TABLE_CACHE[TableName]['header']['order']:
raise Exception('The row is not consistent with the table')
for par_name, par_value, par_format in RowObject:
LOCAL_TABLE_CACHE[TableName]['data'][par_name] += par_value
pass
def getRowObject(RowID, TableName):
# return RowObject from TableObject in CACHE
RowObject = []
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_value = LOCAL_TABLE_CACHE[TableName]['data'][par_name][RowID]
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
RowObject.append((par_name, par_value, par_format))
return RowObject
# INCREASE ROW COUNT
def addRowObject(RowObject, TableName):
# print 'addRowObject: '
# print 'RowObject: '+str(RowObject)
# print 'TableName:'+TableName
for par_name, par_value, par_format in RowObject:
# print 'par_name,par_value,par_format: '+str((par_name,par_value,par_format))
# print '>>> '+ str(LOCAL_TABLE_CACHE[TableName]['data'][par_name])
# LOCAL_TABLE_CACHE[TableName]['data'][par_name] += [par_value]
LOCAL_TABLE_CACHE[TableName]['data'][par_name].append(par_value)
def setRowObject(RowID, RowObject, TableName):
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
if RowID >= 0 and RowID < number_of_rows:
for par_name, par_value, par_format in RowObject:
LOCAL_TABLE_CACHE[TableName]['data'][par_name][RowID] = par_value
else:
# !!! XXX ATTENTION: THIS IS A TEMPORARY INSERTION XXX !!!
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] += 1
addRowObject(RowObject, TableName)
def getDefaultRowObject(TableName):
# get a default RowObject from a table
RowObject = []
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_value = LOCAL_TABLE_CACHE[TableName]['header']['default'][par_name]
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
RowObject.append((par_name, par_value, par_format))
return RowObject
def subsetOfRowObject(ParameterNames, RowObject):
# return a subset of RowObject according to
# RowObjectNew = []
# for par_name,par_value,par_format in RowObject:
# if par_name in ParameterNames:
# RowObjectNew.append((par_name,par_value,par_format))
# return RowObjectNew
dct = {}
for par_name, par_value, par_format in RowObject:
dct[par_name] = (par_name, par_value, par_format)
RowObjectNew = []
for par_name in ParameterNames:
RowObjectNew.append(dct[par_name])
return RowObjectNew
# FORMAT_PYTHON_REGEX = '^\%([0-9]*)\.?([0-9]*)([dfs])$'
FORMAT_PYTHON_REGEX = '^\%(\d*)(\.(\d*))?([edfsEDFS])$'
# Fortran string formatting
# based on a pythonic format string
def formatString(par_format, par_value, lang='FORTRAN'):
# Fortran format rules:
# %M.NP
# M - total field length (optional)
# (minus sign included in M)
# . - decimal ceparator (optional)
# N - number of digits after . (optional)
# P - [dfs] int/float/string
# PYTHON RULE: if N is abcent, default value is 6
regex = FORMAT_PYTHON_REGEX
(lng, trail, lngpnt, ty) = re.search(regex, par_format).groups()
result = par_format % par_value
if ty.lower() in set(['f', 'e']):
lng = int(lng) if lng else 0
lngpnt = int(lngpnt) if lngpnt else 0
result = par_format % par_value
res = result.strip()
if lng == lngpnt + 1:
if res[0:1] == '0':
result = '%%%ds' % lng % res[1:]
if par_value < 0:
if res[1:2] == '0':
result = '%%%ds' % lng % (res[0:1] + res[2:])
return result
def formatGetLength(fmt, lang='FORTRAN'):
regex = FORMAT_PYTHON_REGEX
def putRowObjectToString(RowObject):
# serialize RowObject to string
# TODO: support different languages (C,Fortran)
output_string = ''
for par_name, par_value, par_format in RowObject:
# Python formatting
# output_string += par_format % par_value
# Fortran formatting
# print 'par_name,par_value,par_format: '+str((par_name,par_value,par_format))
output_string += formatString(par_format, par_value)
return output_string
# Parameter nicknames are hardcoded.
PARAMETER_NICKNAMES = {
"a": "A",
"gamma_air": "gair",
"gp": "g",
"local_iso_id": "I",
"molec_id": "M",
"sw": "S",
"local_lower_quanta": "Q_",
"local_upper_quanta": "Q",
"gpp": "g_",
"elower": "E_",
"n_air": "nair",
"delta_air": "dair",
"global_upper_quanta": "V",
"iref": "Iref",
"line_mixing_flag": "f",
"ierr": "ierr",
"nu": "nu",
"gamma_self": "gsel",
"global_lower_quanta": "V_"
}
def putTableHeaderToString(TableName):
output_string = ''
regex = FORMAT_PYTHON_REGEX
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
(lng, trail, lngpnt, ty) = re.search(regex, par_format).groups()
fmt = '%%%ss' % lng
try:
par_name_short = PARAMETER_NICKNAMES[par_name]
except:
par_name_short = par_name
# output_string += fmt % par_name
output_string += (fmt % par_name_short)[:int(lng)]
return output_string
def getRowObjectFromString(input_string, TableName):
# restore RowObject from string, get formats and names in TableName
# print 'getRowObjectFromString:'
pos = 0
RowObject = []
# print 'Header: '+str(LOCAL_TABLE_CACHE[TableName]['header'])
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
# print 'ITERATION\npos: '+str(pos) #
# print 'par_name: '+par_name #
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
# print 'par_format: '+par_format #
regex = '^\%([0-9]+)\.?[0-9]*([dfs])$' #
regex = FORMAT_PYTHON_REGEX
# print 'par_name: '+par_name #
(lng, trail, lngpnt, ty) = re.search(regex, par_format).groups()
lng = int(lng)
# print 'lng,ty:'+str((lng,ty)) #
par_value = input_string[pos:(pos + lng)]
# print 'par_value: '+par_value #
if ty == 'd': # integer value
par_value = int(par_value)
elif ty.lower() in set(['e', 'f']): # float value
par_value = float(par_value)
elif ty == 's': # string value
# par_value = par_value.strip() # strip spaces and tabs
pass # don't strip string value
else:
print('err1')
raise Exception('Format \"%s\" is unknown' % par_format)
RowObject.append((par_name, par_value, par_format))
pos += lng
# Do the same but now for extra (comma-separated) parameters
if 'extra' in set(LOCAL_TABLE_CACHE[TableName]['header']):
csv_chunks = input_string.split(LOCAL_TABLE_CACHE[TableName]['header']. \
get('extra_separator', ','))
# Disregard the first "column-fixed" container if it presents:
if LOCAL_TABLE_CACHE[TableName]['header'].get('order', []):
pos = 1
else:
pos = 0
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['extra']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['extra_format'][par_name]
regex = '^\%([0-9]+)\.?[0-9]*([dfs])$' #
regex = FORMAT_PYTHON_REGEX
(lng, trail, lngpnt, ty) = re.search(regex, par_format).groups()
lng = int(lng)
par_value = csv_chunks[pos]
if ty == 'd': # integer value
try:
par_value = int(par_value)
except:
par_value = 0
elif ty.lower() in set(['e', 'f']): # float value
try:
par_value = float(par_value)
except:
par_value = 0.0
elif ty == 's': # string value
# par_value = par_value.strip() # strip spaces and tabs
pass # don't strip string value
else:
print('err')
raise Exception('Format \"%s\" is unknown' % par_format)
RowObject.append((par_name, par_value, par_format))
pos += 1
return RowObject
# LOCAL_TABLE_CACHE[TableName]['data'][par_name] += par_value # or append()?
# Conversion between OBJECT_FORMAT and STORAGE_FORMAT
# This will substitute putTableToStorage and getTableFromStorage
def cache2storage(TableName):
# print 'cache2storage:'
try:
os.mkdir(VARIABLES['BACKEND_DATABASE_NAME'])
except:
pass
fullpath_data, fullpath_header = getFullTableAndHeaderName(TableName)
# print 'fullpath_data:'+fullpath_data
# print 'fullpath_header'+fullpath_header
# check if file exists and throw an exception
# if isfile(fullpath_data): raise Exception('Table \"%s\" already exists',NewTableName)
# if isfile(fullpath_header): raise Exception('SCHEMA IS BROKEN')
OutfileData = open(fullpath_data, 'w')
OutfileHeader = open(fullpath_header, 'w')
# write table data
line_count = 1
line_number = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
for RowID in range(0, LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']):
# print '%d line from %d' % (line_count,line_number)
line_count += 1
RowObject = getRowObject(RowID, TableName)
# print 'RowObject:'+str(RowObject)
raw_string = putRowObjectToString(RowObject)
# print 'RowObject_string:'+raw_string
OutfileData.write(raw_string + '\n')
# write table header
TableHeader = getTableHeader(TableName)
OutfileHeader.write(json.dumps(TableHeader, indent=2))
def storage2cache(TableName):
# print 'storage2cache:'
# print('TableName',TableName)
fullpath_data, fullpath_header = getFullTableAndHeaderName(TableName)
InfileData = open(fullpath_data, 'r')
InfileHeader = open(fullpath_header, 'r')
# try:
header_text = InfileHeader.read()
try:
Header = json.loads(header_text)
except:
print('HEADER:')
print(header_text)
raise Exception('Invalid header')
# print 'Header:'+str(Header)
LOCAL_TABLE_CACHE[TableName] = {}
LOCAL_TABLE_CACHE[TableName]['header'] = Header
LOCAL_TABLE_CACHE[TableName]['data'] = {}
# Check if Header['order'] and Header['extra'] contain
# parameters with same names, raise exception if true.
# intersct = set(Header['order']).intersection(set(Header.get('extra',[])))
intersct = set(Header.get('order', [])).intersection(set(Header.get('extra', [])))
if intersct:
raise Exception('Parameters with the same names: {}'.format(intersct))
# initialize empty data to avoid problems
glob_order = [];
glob_format = {};
glob_default = {}
if "order" in LOCAL_TABLE_CACHE[TableName]['header'].keys():
glob_order += LOCAL_TABLE_CACHE[TableName]['header']['order']
glob_format.update(LOCAL_TABLE_CACHE[TableName]['header']['format'])
glob_default.update(LOCAL_TABLE_CACHE[TableName]['header']['default'])
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
if "extra" in LOCAL_TABLE_CACHE[TableName]['header'].keys():
glob_order += LOCAL_TABLE_CACHE[TableName]['header']['extra']
glob_format.update(LOCAL_TABLE_CACHE[TableName]['header']['extra_format'])
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['extra']:
glob_default[par_name] = PARAMETER_META[par_name]['default_fmt']
LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
line_count = 0
# line_number = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
for line in InfileData:
# print '%d line from %d' % (line_count,line_number)
# print 'line: '+line #
try:
RowObject = getRowObjectFromString(line, TableName)
line_count += 1
except:
continue
# print 'RowObject: '+str(RowObject)
addRowObject(RowObject, TableName)
# except:
# raise Exception('TABLE FETCHING ERROR')
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = line_count
# Delete all character-separated values, treat them as column-fixed.
try:
del LOCAL_TABLE_CACHE[TableName]['header']['extra']
del LOCAL_TABLE_CACHE[TableName]['header']['extra_format']
del LOCAL_TABLE_CACHE[TableName]['header']['extra_separator']
except:
pass
# Update header.order/format with header.extra/format if exist.
LOCAL_TABLE_CACHE[TableName]['header']['order'] = glob_order
LOCAL_TABLE_CACHE[TableName]['header']['format'] = glob_format
LOCAL_TABLE_CACHE[TableName]['header']['default'] = glob_default
InfileData.close()
InfileHeader.close()
print(' Lines parsed: %d' % line_count)
pass
# / FORMAT CONVERSION LAYER
def getTableNamesFromStorage(StorageName):
file_names = listdir(StorageName)
table_names = []
for file_name in file_names:
# search all files with "header" extensions
# matchObject = re.search('(\w+)\.header$',file_name)
matchObject = re.search('(.+)\.header$', file_name)
if matchObject:
# print('matchObject.group(1)=',matchObject.group(1))
table_names.append(matchObject.group(1))
return table_names
# FIX POSSIBLE BUG: SIMILAR NAMES OF .PAR AND .DATA FILES
# BUG FIXED BY INTRODUCING A PRIORITY:
# *.data files have more priority than *.par files
# See getFullTableAndHeaderName function for explanation
def scanForNewParfiles(StorageName):
file_names = listdir(StorageName)
headers = {} # without extensions!
parfiles_without_header = []
for file_name in file_names:
# create dictionary of unique headers
try:
# fname,fext = re.search('(\w+)\.(\w+)',file_name).groups()
fname, fext = re.search('(.+)\.(\w+)', file_name).groups()
except:
continue
if fext == 'header': headers[fname] = True
for file_name in file_names:
# check if extension is 'par' and the header is absent
try:
# fname,fext = re.search('(\w+)\.(\w+)',file_name).groups()
fname, fext = re.search('(.+)\.(\w+)', file_name).groups()
except:
continue
if fext == 'par' and fname not in headers:
parfiles_without_header.append(fname)
return parfiles_without_header
def createHeader(TableName):
fname = TableName + '.header'
fp = open(VARIABLES['BACKEND_DATABASE_NAME'] + '/' + fname, 'w')
if os.path.isfile(TableName):
raise Exception('File \"%s\" already exists!' % fname)
fp.write(json.dumps(HITRAN_DEFAULT_HEADER, indent=2))
fp.close()
def loadCache():
# print 'loadCache:'
print('Using ' + VARIABLES['BACKEND_DATABASE_NAME'] + '\n')
LOCAL_TABLE_CACHE = {} # ?????
table_names = getTableNamesFromStorage(VARIABLES['BACKEND_DATABASE_NAME'])
# print('table_names=',table_names)
parfiles_without_header = scanForNewParfiles(VARIABLES['BACKEND_DATABASE_NAME'])
# create headers for new parfiles
for tab_name in parfiles_without_header:
# get name without 'par' extension
createHeader(tab_name)
table_names.append(tab_name)
for TableName in table_names:
print(TableName)
storage2cache(TableName)
def saveCache():
# print 'saveCache:'
try:
# delete query buffer
del LOCAL_TABLE_CACHE[QUERY_BUFFER]
except:
pass
for TableName in LOCAL_TABLE_CACHE:
print(TableName)
cache2storage(TableName)
# DB backend level, start transaction
def databaseBegin(db=None):
if db:
VARIABLES['BACKEND_DATABASE_NAME'] = db
else:
VARIABLES['BACKEND_DATABASE_NAME'] = BACKEND_DATABASE_NAME_DEFAULT
# print 'databaseBegin:'
# print(os.path.isdir("/home/el"))
# print(os.path.exists("/home/el/myfile.txt"))
if not os.path.exists(VARIABLES['BACKEND_DATABASE_NAME']):
os.mkdir(VARIABLES['BACKEND_DATABASE_NAME'])
loadCache()
# DB backend level, end transaction
def databaseCommit():
# print 'databaseCommit:'
saveCache()
# def saveCache():
# for TableName in LOCAL_TABLE_CACHE.keys():
# putTableToStorage(TableName)
# ----------------------------------------------------
# ----------------------------------------------------
# CONDITIONS
# ----------------------------------------------------
# ----------------------------------------------------
# ----------------------------------------------------
# hierarchic query.condition language:
# Conditions: CONS = ('and', ('=','p1','p2'), ('<','p1',13))
# String literals are distinguished from variable names
# by using the operation ('STRING','some_string')
# ----------------------------------------------------
# necessary conditions for hitranonline:
SAMPLE_CONDITIONS = ('AND', ('SET', 'internal_iso_id', [1, 2, 3, 4, 5, 6]), ('>=', 'nu', 0), ('<=', 'nu', 100))
# sample hitranonline protocol
# http://hitran.cloudapp.net/lbl/5?output_format_id=1&iso_ids_list=5&numin=0&numax=100&access=api&key=e20e4bd3-e12c-4931-99e0-4c06e88536bd
CONDITION_OPERATIONS = set(
['AND', 'OR', 'NOT', 'RANGE', 'IN', '<', '>', '<=', '>=', '==', '!=', 'LIKE', 'STR', '+', '-', '*', '/', 'MATCH',
'SEARCH', 'FINDALL'])
# Operations used in Condition verification
# Basic scheme: operationXXX(args),
# where args - list/array of arguments (>=1)
def operationAND(args):
# any number if arguments
for arg in args:
if not arg:
return False
return True
def operationOR(args):
# any number of arguments
for arg in args:
if arg:
return True
return False
def operationNOT(arg):
# one argument
return not arg
def operationRANGE(x, x_min, x_max):
return x_min <= x <= x_max
def operationSUBSET(arg1, arg2):
# True if arg1 is subset of arg2
# arg1 is an element
# arg2 is a set
return arg1 in arg2
def operationLESS(args):
# any number of args
for i in range(1, len(args)):
if args[i - 1] >= args[i]:
return False
return True
def operationMORE(args):
# any number of args
for i in range(1, len(args)):
if args[i - 1] <= args[i]:
return False
return True
def operationLESSOREQUAL(args):
# any number of args
for i in range(1, len(args)):
if args[i - 1] > args[i]:
return False
return True
def operationMOREOREQUAL(args):
# any number of args
for i in range(1, len(args)):
if args[i - 1] < args[i]:
return False
return True
def operationEQUAL(args):
# any number of args
for i in range(1, len(args)):
if args[i] != args[i - 1]:
return False
return True
def operationNOTEQUAL(arg1, arg2):
return arg1 != arg2
def operationSUM(args):
# any numbers of arguments
if type(args[0]) in set([int, float]):
result = 0
elif type(args[0]) in set([str, unicode]):
result = ''
else:
raise Exception('SUM error: unknown arg type')
for arg in args:
result += arg
return result
def operationDIFF(arg1, arg2):
return arg1 - arg2
def operationMUL(args):
# any numbers of arguments
if type(args[0]) in set([int, float]):
result = 1
else:
raise Exception('MUL error: unknown arg type')
for arg in args:
result *= arg
return result
def operationDIV(arg1, arg2):
return arg1 / arg2
def operationSTR(arg):
# transform arg to str
if type(arg) != str:
raise Exception('Type mismatch: STR')
return arg
def operationSET(arg):
# transform arg to list
if type(arg) not in set([list, tuple, set]):
raise Exception('Type mismatch: SET')
return list(arg)
def operationMATCH(arg1, arg2):
# Match regex (arg1) and string (arg2)
# return bool(re.match(arg1,arg2)) # works wrong
return bool(re.search(arg1, arg2))
def operationSEARCH(arg1, arg2):
# Search regex (arg1) in string (arg2)
# Output list of entries
group = re.search(arg1, arg2).groups()
result = []
for item in group:
result.append(('STR', item))
return result
def operationFINDALL(arg1, arg2):
# Search all groups of a regex
# Output a list of groups of entries
# XXX: If a group has more than 1 entry,
# there could be potential problems
list_of_groups = re.findall(arg1, arg2)
result = []
for item in list_of_groups:
result.append(('STR', item))
return result
def operationLIST(args):
# args is a list: do nothing (almost)
return list(args)
# /operations
# def parse(Conditions):
# pass
# GROUPING ----------------------------------------------
GROUP_INDEX = {}
# GROUP_INDEX has the following structure:
# GROUP_INDEX[KEY] = VALUE
# KEY = table line values
# VALUE = {'FUNCTIONS':DICT,'FLAG':LOGICAL,'ROWID':INTEGER}
# FUNCTIONS = {'FUNC_NAME':DICT}
# FUNC_NAME = {'FLAG':LOGICAL,'NAME':STRING}
# name and default value
GROUP_FUNCTION_NAMES = {'COUNT': 0,
'SUM': 0,
'MUL': 1,
'AVG': 0,
'MIN': +1e100,
'MAX': -1e100,
'SSQ': 0,
}
def clearGroupIndex():
# GROUP_INDEX = {}
for key in GROUP_INDEX.keys():
del GROUP_INDEX[key]
def getValueFromGroupIndex(GroupIndexKey, FunctionName):
# If no such index_key, create it and return a value
if FunctionName not in GROUP_FUNCTION_NAMES:
raise Exception('No such function \"%s\"' % FunctionName)
# In the case if NewRowObjectDefault is requested
if not GroupIndexKey:
return GROUP_FUNCTION_NAMES[FunctionName]
if FunctionName not in GROUP_INDEX[GroupIndexKey]['FUNCTIONS']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName] = {}
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = True
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE'] = \
GROUP_FUNCTION_NAMES[FunctionName]
return GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE']
def setValueToGroupIndex(GroupIndexKey, FunctionName, Value):
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE'] = Value
def initializeGroup(GroupIndexKey):
if GroupIndexKey not in GROUP_INDEX:
print('GROUP_DESC[COUNT]=' + str(GROUP_DESC['COUNT']))
GROUP_INDEX[GroupIndexKey] = {}
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'] = {}
GROUP_INDEX[GroupIndexKey]['ROWID'] = len(GROUP_INDEX) - 1
for FunctionName in GROUP_FUNCTION_NAMES:
# initialize function flags (UpdateFlag)
if FunctionName in GROUP_INDEX[GroupIndexKey]['FUNCTIONS']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = True
print('initializeGroup: GROUP_INDEX=' + str(GROUP_INDEX))
def groupCOUNT(GroupIndexKey):
FunctionName = 'COUNT'
Value = getValueFromGroupIndex(GroupIndexKey, FunctionName)
if GroupIndexKey:
if GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = False
Value = Value + 1
setValueToGroupIndex(GroupIndexKey, FunctionName, Value)
return Value
def groupSUM():
pass
def groupMUL():
pass
def groupAVG():
pass
def groupMIN():
pass
def groupMAX():
pass
def groupSSQ():
pass
OPERATORS = { \
# List
'LIST': lambda args: operationLIST(args),
# And
'&': lambda args: operationAND(args),
'&&': lambda args: operationAND(args),
'AND': lambda args: operationAND(args),
# Or
'|': lambda args: operationOR(args),
'||': lambda args: operationOR(args),
'OR': lambda args: operationOR(args),
# Not
'!': lambda args: operationNOT(args[0]),
'NOT': lambda args: operationNOT(args[0]),
# Between
'RANGE': lambda args: operationRANGE(args[0], args[1], args[2]),
'BETWEEN': lambda args: operationRANGE(args[0], args[1], args[2]),
# Subset
'IN': lambda args: operationSUBSET(args[0], args[1]),
'SUBSET': lambda args: operationSUBSET(args[0], args[1]),
# Less
'<': lambda args: operationLESS(args),
'LESS': lambda args: operationLESS(args),
'LT': lambda args: operationLESS(args),
# More
'>': lambda args: operationMORE(args),
'MORE': lambda args: operationMORE(args),
'MT': lambda args: operationMORE(args),
# Less or equal
'<=': lambda args: operationLESSOREQUAL(args),
'LESSOREQUAL': lambda args: operationLESSOREQUAL(args),
'LTE': lambda args: operationLESSOREQUAL(args),
# More or equal
'>=': lambda args: operationMOREOREQUAL(args),
'MOREOREQUAL': lambda args: operationMOREOREQUAL(args),
'MTE': lambda args: operationMOREOREQUAL(args),
# Equal
'=': lambda args: operationEQUAL(args),
'==': lambda args: operationEQUAL(args),
'EQ': lambda args: operationEQUAL(args),
'EQUAL': lambda args: operationEQUAL(args),
'EQUALS': lambda args: operationEQUAL(args),
# Not equal
'!=': lambda args: operationNOTEQUAL(args[0], args[1]),
'<>': lambda args: operationNOTEQUAL(args[0], args[1]),
'~=': lambda args: operationNOTEQUAL(args[0], args[1]),
'NE': lambda args: operationNOTEQUAL(args[0], args[1]),
'NOTEQUAL': lambda args: operationNOTEQUAL(args[0], args[1]),
# Plus
'+': lambda args: operationSUM(args),
'SUM': lambda args: operationSUM(args),
# Minus
'-': lambda args: operationDIFF(args[0], args[1]),
'DIFF': lambda args: operationDIFF(args[0], args[1]),
# Mul
'*': lambda args: operationMUL(args),
'MUL': lambda args: operationMUL(args),
# Div
'/': lambda args: operationDIV(args[0], args[1]),
'DIV': lambda args: operationDIV(args[0], args[1]),
# Regexp match
'MATCH': lambda args: operationMATCH(args[0], args[1]),
'LIKE': lambda args: operationMATCH(args[0], args[1]),
# Regexp search
'SEARCH': lambda args: operationSEARCH(args[0], args[1]),
# Regexp findal
'FINDALL': lambda args: operationFINDALL(args[0], args[1]),
# Group count
'COUNT': lambda args: groupCOUNT(GroupIndexKey),
}
# new evaluateExpression function,
# accounting for groups
"""
def evaluateExpression(root,VarDictionary,GroupIndexKey=None):
# input = local tree root
# XXX: this could be very slow due to passing
# every time VarDictionary as a parameter
# Two special cases: 1) root=varname
# 2) root=list/tuple
# These cases must be processed in a separate way
if type(root) in set([list,tuple]):
# root is not a leaf
head = root[0].upper()
# string constants are treated specially
if head in set(['STR','STRING']): # one arg
return operationSTR(root[1])
elif head in set(['SET']):
return operationSET(root[1])
tail = root[1:]
args = []
# evaluate arguments recursively
for element in tail: # resolve tree by recursion
args.append(evaluateExpression(element,VarDictionary,GroupIndexKey))
# call functions with evaluated arguments
if head in set(['LIST']): # list arg
return operationLIST(args)
elif head in set(['&','&&','AND']): # many args
return operationAND(args)
elif head in set(['|','||','OR']): # many args
return operationOR(args)
elif head in set(['!','NOT']): # one args
return operationNOT(args[0])
elif head in set(['RANGE','BETWEEN']): # three args
return operationRANGE(args[0],args[1],args[2])
elif head in set(['IN','SUBSET']): # two args
return operationSUBSET(args[0],args[1])
elif head in set(['<','LESS','LT']): # many args
return operationLESS(args)
elif head in set(['>','MORE','MT']): # many args
return operationMORE(args)
elif head in set(['<=','LESSOREQUAL','LTE']): # many args
return operationLESSOREQUAL(args)
elif head in set(['>=','MOREOREQUAL','MTE']): # many args
return operationMOREOREQUAL(args)
elif head in set(['=','==','EQ','EQUAL','EQUALS']): # many args
return operationEQUAL(args)
elif head in set(['!=','<>','~=','NE','NOTEQUAL']): # two args
return operationNOTEQUAL(args[0],args[1])
elif head in set(['+','SUM']): # many args
return operationSUM(args)
elif head in set(['-','DIFF']): # two args
return operationDIFF(args[0],args[1])
elif head in set(['*','MUL']): # many args
return operationMUL(args)
elif head in set(['/','DIV']): # two args
return operationDIV(args[0],args[1])
elif head in set(['MATCH','LIKE']): # two args
return operationMATCH(args[0],args[1])
elif head in set(['SEARCH']): # two args
return operationSEARCH(args[0],args[1])
elif head in set(['FINDALL']): # two args
return operationFINDALL(args[0],args[1])
# --- GROUPING OPERATIONS ---
elif head in set(['COUNT']):
return groupCOUNT(GroupIndexKey)
else:
raise Exception('Unknown operator: %s' % root[0])
elif type(root)==str:
# root is a par_name
return VarDictionary[root]
else:
# root is a non-string constant
return root
"""
def evaluateExpression(root, VarDictionary, GroupIndexKey=None):
# input = local tree root
# XXX: this could be very slow due to passing
# every time VarDictionary as a parameter
# Two special cases: 1) root=varname
# 2) root=list/tuple
# These cases must be processed in a separate way
if type(root) in set([list, tuple]):
# root is not a leaf
head = root[0].upper()
# string constants are treated specially
if head in set(['STR', 'STRING']): # one arg
return operationSTR(root[1])
elif head in set(['SET']):
return operationSET(root[1])
tail = root[1:]
args = []
# evaluate arguments recursively
for element in tail: # resolve tree by recursion
args.append(evaluateExpression(element, VarDictionary, GroupIndexKey))
# call functions with evaluated arguments
try:
return OPERATORS[head](args)
except KeyError:
raise Exception('Unknown operator: %s' % head)
elif type(root) == str:
# root is a par_name
return VarDictionary[root]
else:
# root is a non-string constant
return root
def getVarDictionary(RowObject):
# get VarDict from RowObject
# VarDict: par_name => par_value
VarDictionary = {}
for par_name, par_value, par_format in RowObject:
VarDictionary[par_name] = par_value
return VarDictionary
def checkRowObject(RowObject, Conditions, VarDictionary):
# VarDictionary = getVarDictionary(RowObject)
if Conditions:
Flag = evaluateExpression(Conditions, VarDictionary)
else:
Flag = True
return Flag
# ----------------------------------------------------
# /CONDITIONS
# ----------------------------------------------------
# ----------------------------------------------------
# PARAMETER NAMES (includeing creation of new ones)
# ----------------------------------------------------
# Bind an expression to a new parameter
# in a form: ('BIND','new_par',('some_exp',...))
def operationBIND(parname, Expression, VarDictionary):
pass
# This section is for more detailed processing of parlists.
# Table creation must include not only subsets of
# existing parameters, but also new parameters
# derived from functions on a special prefix language
# For this reason subsetOfRowObject(..) must be substituted
# by newRowObject(ParameterNames,RowObject)
# For parsing use the function evaluateExpression
# Get names from expression.
# Must merge this one with evaluateExrpression.
# This is VERY LIMITED version of what will be
# when make the language parser is implemented.
# For more ideas and info see LANGUAGE_REFERENCE
# more advansed version of expression evaluator
def evaluateExpressionPAR(ParameterNames, VarDictionary=None):
# RETURN: 1) Upper-level Expression names
# 2) Upper-level Expression values
# Is it reasonable to pass a Context to every parse function?
# For now the function does the following:
# 1) iterates through all UPPER-LEVEL list elements
# 2) if element is a parname: return parname
# if element is an BIND expression: return bind name
# (see operationBIND)
# 3) if element is an anonymous expression: return #N(=1,2,3...)
# N.B. Binds can be only on the 0-th level of Expression
pass
def getContextFormat(RowObject):
# Get context format from the whole RowObject
ContextFormat = {}
for par_name, par_value, par_format in RowObject:
ContextFormat[par_name] = par_format
return ContextFormat
def getDefaultFormat(Type):
if Type is int:
return '%10d'
elif Type is float:
return '%25.15E'
elif Type is str:
return '%20s'
elif Type is bool:
return '%2d'
else:
raise Exception('Unknown type')
def getDefaultValue(Type):
if Type is int:
return 0
elif Type is float:
return 0.0
elif Type is str:
return ''
elif Type is bool:
return False
else:
raise Exception('Unknown type')
# VarDictionary = Context (this name is more suitable)
# GroupIndexKey is a key to special structure/dictionary GROUP_INDEX.
# GROUP_INDEX contains information needed to calculate streamed group functions
# such as COUNT, AVG, MIN, MAX etc...
def newRowObject(ParameterNames, RowObject, VarDictionary, ContextFormat, GroupIndexKey=None):
# Return a subset of RowObject according to
# ParameterNames include either parnames
# or expressions containing parnames literals
# ContextFormat contains format for ParNames
anoncount = 0
RowObjectNew = []
for expr in ParameterNames:
if type(expr) in set([list, tuple]): # bind
head = expr[0]
if head in set(['let', 'bind', 'LET', 'BIND']):
par_name = expr[1]
par_expr = expr[2]
else:
par_name = "#%d" % anoncount
anoncount += 1
par_expr = expr
par_value = evaluateExpression(par_expr, VarDictionary, GroupIndexKey)
try:
par_format = expr[3]
except:
par_format = getDefaultFormat(type(par_value))
else: # parname
par_name = expr
par_value = VarDictionary[par_name]
par_format = ContextFormat[par_name]
RowObjectNew.append((par_name, par_value, par_format))
return RowObjectNew
# ----------------------------------------------------
# /PARAMETER NAMES
# ----------------------------------------------------
# ----------------------------------------------------
# OPERATIONS ON TABLES
# ----------------------------------------------------
QUERY_BUFFER = '__BUFFER__'
def getTableList():
return LOCAL_TABLE_CACHE.keys()
def describeTable(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to describe
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Print information about table, including
parameter names, formats and wavenumber range.
---
EXAMPLE OF USAGE:
describeTable('sampletab')
---
"""
print('-----------------------------------------')
print(TableName + ' summary:')
try:
print('-----------------------------------------')
print('Comment: \n' + LOCAL_TABLE_CACHE[TableName]['header']['comment'])
except:
pass
print('Number of rows: ' + str(LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']))
print('Table type: ' + str(LOCAL_TABLE_CACHE[TableName]['header']['table_type']))
print('-----------------------------------------')
print(' PAR_NAME PAR_FORMAT')
print('')
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
print('%20s %20s' % (par_name, par_format))
print('-----------------------------------------')
# Write a table to File or STDOUT
def outputTable(TableName, Conditions=None, File=None, Header=True):
# Display or record table with condition checking
if File:
Header = False
OutputFile = open(File, 'w')
if Header:
headstr = putTableHeaderToString(TableName)
if File:
OutputFile.write(headstr)
else:
print(headstr)
for RowID in range(0, LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']):
RowObject = getRowObject(RowID, TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
if not checkRowObject(RowObject, Conditions, VarDictionary):
continue
raw_string = putRowObjectToString(RowObject)
if File:
OutputFile.write(raw_string + '\n')
else:
print(raw_string)
# Create table "prototype-based" way
def createTable(TableName, RowObjectDefault):
# create a Table based on a RowObjectDefault
LOCAL_TABLE_CACHE[TableName] = {}
header_order = []
header_format = {}
header_default = {}
data = {}
for par_name, par_value, par_format in RowObjectDefault:
header_order.append(par_name)
header_format[par_name] = par_format
header_default[par_name] = par_value
data[par_name] = []
# header_order = tuple(header_order) # XXX ?
LOCAL_TABLE_CACHE[TableName]['header'] = {}
LOCAL_TABLE_CACHE[TableName]['header']['order'] = header_order
LOCAL_TABLE_CACHE[TableName]['header']['format'] = header_format
LOCAL_TABLE_CACHE[TableName]['header']['default'] = header_default
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = 0
LOCAL_TABLE_CACHE[TableName]['header']['size_in_bytes'] = 0
LOCAL_TABLE_CACHE[TableName]['header']['table_name'] = TableName
LOCAL_TABLE_CACHE[TableName]['header']['table_type'] = 'column-fixed'
LOCAL_TABLE_CACHE[TableName]['data'] = data
# simple "drop table" capability
def dropTable(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to delete
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Deletes a table from local database.
---
EXAMPLE OF USAGE:
dropTable('some_dummy_table')
---
"""
# delete Table from both Cache and Storage
try:
# LOCAL_TABLE_CACHE[TableName] = {}
del LOCAL_TABLE_CACHE[TableName]
except:
pass
# delete from storage
pass # TODO
# Returns a column corresponding to parameter name
def getColumn(TableName, ParameterName):
"""
INPUT PARAMETERS:
TableName: source table name (required)
ParameterName: name of column to get (required)
OUTPUT PARAMETERS:
ColumnData: list of values from specified column
---
DESCRIPTION:
Returns a column with a name ParameterName from
table TableName. Column is returned as a list of values.
---
EXAMPLE OF USAGE:
p1 = getColumn('sampletab','p1')
---
"""
return LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]
# Returns a list of columns corresponding to parameter names
def getColumns(TableName, ParameterNames):
"""
INPUT PARAMETERS:
TableName: source table name (required)
ParameterNames: list of column names to get (required)
OUTPUT PARAMETERS:
ListColumnData: tuple of lists of values from specified column
---
DESCRIPTION:
Returns columns with a names in ParameterNames from
table TableName. Columns are returned as a tuple of lists.
---
EXAMPLE OF USAGE:
p1,p2,p3 = getColumns('sampletab',('p1','p2','p3'))
---
"""
Columns = []
for par_name in ParameterNames:
Columns.append(LOCAL_TABLE_CACHE[TableName]['data'][par_name])
return Columns
def addColumn(TableName, ParameterName, Before=None, Expression=None, Type=None, Default=None, Format=None):
if ParameterName in LOCAL_TABLE_CACHE[TableName]['header']['format']:
raise Exception('Column \"%s\" already exists' % ParameterName)
if not Type: Type = float
if not Default: Default = getDefaultValue(Type)
if not Format: Format = getDefaultFormat(Type)
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# Mess with data
if not Expression:
LOCAL_TABLE_CACHE[TableName]['data'][ParameterName] = [Default for i in range(0, number_of_rows)]
else:
data = []
for RowID in range(0, number_of_rows):
RowObject = getRowObject(RowID, TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
par_value = evaluateExpression(Expression, VarDictionary)
data.append(par_value)
LOCAL_TABLE_CACHE[TableName]['data'][ParameterName] = data
# Mess with header
header_order = LOCAL_TABLE_CACHE[TableName]['header']['order']
if not Before:
header_order.append(ParameterName)
else:
# i = 0
# for par_name in header_order:
# if par_name == Before: break
# i += 1
i = header_order.index(Before)
header_order = header_order[:i] + [ParameterName, ] + header_order[i:]
LOCAL_TABLE_CACHE[TableName]['header']['order'] = header_order
LOCAL_TABLE_CACHE[TableName]['header']['format'][ParameterName] = Format
LOCAL_TABLE_CACHE[TableName]['header']['default'][ParameterName] = Default
def deleteColumn(TableName, ParameterName):
if ParameterName not in LOCAL_TABLE_CACHE[TableName]['header']['format']:
raise Exception('No such column \"%s\"' % ParameterName)
# Mess with data
i = LOCAL_TABLE_CACHE[TableName]['header']['order'].index(ParameterName)
del LOCAL_TABLE_CACHE[TableName]['header']['order'][i]
del LOCAL_TABLE_CACHE[TableName]['header']['format'][ParameterName]
del LOCAL_TABLE_CACHE[TableName]['header']['default'][ParameterName]
if not LOCAL_TABLE_CACHE[TableName]['header']['order']:
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = 0
# Mess with header
del LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]
def deleteColumns(TableName, ParameterNames):
if type(ParameterNames) not in set([list, tuple, set]):
ParameterNames = [ParameterNames]
for ParameterName in ParameterNames:
deleteColumn(TableName, ParameterName)
def renameColumn(TableName, OldParameterName, NewParameterName):
pass
def insertRow():
pass
def deleteRows(TableName, ParameterNames, Conditions):
pass
# select from table to another table
def selectInto(DestinationTableName, TableName, ParameterNames, Conditions):
# TableName must refer to an existing table in cache!!
# Conditions = Restrictables in specific format
# Sample conditions: cond = {'par1':{'range',[b_lo,b_hi]},'par2':b}
# return structure similar to TableObject and put it to QUERY_BUFFER
# if ParameterNames is '*' then all parameters are used
# table_columns = LOCAL_TABLE_CACHE[TableName]['data'].keys()
# table_length = len(TableObject['header']['number_of_rows'])
# if ParameterNames=='*':
# ParameterNames = table_columns
# check if Conditions contain elements which are not in the TableObject
# condition_variables = getConditionVariables(Conditions)
# strange_pars = set(condition_variables)-set(table_variables)
# if strange_pars:
# raise Exception('The following parameters are not in the table \"%s\"' % (TableName,list(strange_pars)))
# do full scan each time
if DestinationTableName == TableName:
raise Exception('Selecting into source table is forbidden')
table_length = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
row_count = 0
for RowID in range(0, table_length):
RowObject = getRowObject(RowID, TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
ContextFormat = getContextFormat(RowObject)
RowObjectNew = newRowObject(ParameterNames, RowObject, VarDictionary, ContextFormat)
if checkRowObject(RowObject, Conditions, VarDictionary):
addRowObject(RowObjectNew, DestinationTableName)
row_count += 1
LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] += row_count
def length(TableName):
tab_len = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# print(str(tab_len)+' rows in '+TableName)
return tab_len
# Select parameters from a table with certain conditions.
# Parameters can be the names or expressions.
# Conditions contain a list of expressions in a special language.
# Set Output to False to suppress output
# Set File=FileName to redirect output to a file.
def select(TableName, DestinationTableName=QUERY_BUFFER, ParameterNames=None, Conditions=None, Output=True, File=None):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions (optional)
Conditions: list of logincal expressions (optional)
Output: enable (True) or suppress (False) text output (optional)
File: enable (True) or suppress (False) file output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Select or filter the data in some table
either to standard output or to file (if specified)
---
EXAMPLE OF USAGE:
select('sampletab',DestinationTableName='outtab',ParameterNames=(p1,p2),
Conditions=(('and',('>=','p1',1),('<',('*','p1','p2'),20))))
Conditions means (p1>=1 and p1*p2<20)
---
"""
# TODO: Variables defined in ParameterNames ('LET') MUST BE VISIBLE IN Conditions !!
# check if table exists
if TableName not in LOCAL_TABLE_CACHE.keys():
raise Exception('%s: no such table. Check tableList() for more info.' % TableName)
if not ParameterNames: ParameterNames = LOCAL_TABLE_CACHE[TableName]['header']['order']
LOCAL_TABLE_CACHE[DestinationTableName] = {} # clear QUERY_BUFFER for the new result
RowObjectDefault = getDefaultRowObject(TableName)
VarDictionary = getVarDictionary(RowObjectDefault)
ContextFormat = getContextFormat(RowObjectDefault)
RowObjectDefaultNew = newRowObject(ParameterNames, RowObjectDefault, VarDictionary, ContextFormat)
dropTable(DestinationTableName) # redundant
createTable(DestinationTableName, RowObjectDefaultNew)
selectInto(DestinationTableName, TableName, ParameterNames, Conditions)
if DestinationTableName != QUERY_BUFFER:
if File: outputTable(DestinationTableName, File=File)
elif Output:
outputTable(DestinationTableName, File=File)
# SORTING ===========================================================
def arrangeTable(TableName, DestinationTableName=None, RowIDList=None):
# print 'AT/'
# print 'AT: RowIDList = '+str(RowIDList)
# make a subset of table rows according to RowIDList
if not DestinationTableName:
DestinationTablename = TableName
if DestinationTableName != TableName:
dropTable(DestinationTableName)
LOCAL_TABLE_CACHE[DestinationTableName]['header'] = LOCAL_TABLE_CACHE[TableName]['header']
LOCAL_TABLE_CACHE[DestinationTableName]['data'] = {}
LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] = len(RowIDList)
# print 'AT: RowIDList = '+str(RowIDList)
for par_name in LOCAL_TABLE_CACHE[DestinationTableName]['header']['order']:
par_data = LOCAL_TABLE_CACHE[TableName]['data'][par_name]
LOCAL_TABLE_CACHE[DestinationTableName]['data'][par_name] = [par_data[i] for i in RowIDList]
def compareLESS(RowObject1, RowObject2, ParameterNames):
# print 'CL/'
# arg1 and arg2 are RowObjects
# Compare them according to ParameterNames
# Simple validity check:
# if len(arg1) != len(arg2):
# raise Exception('Arguments have different lengths')
# RowObject1Subset = subsetOfRowObject(ParameterNames,RowObject1)
# RowObject2Subset = subsetOfRowObject(ParameterNames,RowObject2)
# return RowObject1Subset < RowObject2Subset
row1 = []
row2 = []
# n = len(RowObject1)
# for i in range(0,n):
# par_name1 = RowObject1[i][0]
# if par_name1 in ParameterNames:
# par_value1 = RowObject1[i][1]
# par_value2 = RowObject2[i][1]
# row1 += [par_value1]
# row2 += [par_value2]
VarDictionary1 = getVarDictionary(RowObject1)
VarDictionary2 = getVarDictionary(RowObject2)
for par_name in ParameterNames:
par_value1 = VarDictionary1[par_name]
par_value2 = VarDictionary2[par_name]
row1 += [par_value1]
row2 += [par_value2]
Flag = row1 < row2
# print 'CL: row1 = '+str(row1)
# print 'CL: row2 = '+str(row2)
# print 'CL: Flag = '+str(Flag)
return Flag
def quickSort(index, TableName, ParameterNames, Accending=True):
# print ''
# print 'QS/'
# print 'QS: index = '+str(index)
# print index
# ParameterNames: names of parameters which are
# taking part in the sorting
if index == []:
return []
else:
# pivot = lst[0]
# lesser = quickSort([x for x in lst[1:] if x < pivot])
# greater = quickSort([x for x in lst[1:] if x >= pivot])
PivotID = index[0]
Pivot = getRowObject(PivotID, TableName)
lesser_index = []
greater_index = [];
for RowID in index[1:]:
RowObject = getRowObject(RowID, TableName)
if compareLESS(RowObject, Pivot, ParameterNames):
lesser_index += [RowID]
else:
greater_index += [RowID]
# print 'QS: lesser_index = '+str(lesser_index)
# print 'QS: greater_index = '+str(greater_index)
lesser = quickSort(lesser_index, TableName, ParameterNames, Accending)
greater = quickSort(greater_index, TableName, ParameterNames, Accending)
# return lesser + [pivot_index] + greater
if Accending:
return lesser + [PivotID] + greater
else:
return greater + [PivotID] + lesser
# Sorting must work well on the table itself!
def sort(TableName, DestinationTableName=None, ParameterNames=None, Accending=True, Output=False, File=None):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions to sort by (optional)
Accending: sort in ascending (True) or descending (False) order (optional)
Output: enable (True) or suppress (False) text output (optional)
File: enable (True) or suppress (False) file output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Sort a table by a list of it's parameters or expressions.
The sorted table is saved in DestinationTableName (if specified).
---
EXAMPLE OF USAGE:
sort('sampletab',ParameterNames=(p1,('+',p1,p2)))
---
"""
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
index = range(0, number_of_rows)
# print 'num = '+str(number_of_rows)
if not DestinationTableName:
DestinationTableName = TableName
# if names are not provided use all parameters in sorting
if not ParameterNames:
ParameterNames = LOCAL_TABLE_CACHE[TableName]['header']['order']
elif type(ParameterNames) not in set([list, tuple]):
ParameterNames = [ParameterNames] # fix of stupid bug where ('p1',) != ('p1')
# print 'SRT: ParameterNames = '+str(ParameterNames)
# print 'parnames: '+str(ParameterNames)
index_sorted = quickSort(index, TableName, ParameterNames, Accending)
arrangeTable(TableName, DestinationTableName, index_sorted)
if Output:
outputTable(DestinationTableName, File=File)
# /SORTING ==========================================================
# GROUPING ==========================================================
# GROUP_INDEX global auxillary structure is a Dictionary,
# which has the following properties:
# 1) Each key is a composite variable:
# [array of values of ParameterNames variable
# STREAM_UPDATE_FLAG]
# 2) Each value is an index in LOCAL_TABLE_CACHE[TableName]['data'][...],
# corresponding to this key
# STREAM_UPDATE_FLAG = TRUE if value in GROUP_INDEX needs updating
# = FALSE otherwise
# If no grouping variables are specified (GroupParameterNames==None)
# than the following key is used: "__GLOBAL__"
def group(TableName, DestinationTableName=QUERY_BUFFER, ParameterNames=None, GroupParameterNames=None, Output=True):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions to take (optional)
GroupParameterNames: list of parameters or expressions to group by (optional)
Accending: sort in ascending (True) or descending (False) order (optional)
Output: enable (True) or suppress (False) text output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
none
---
EXAMPLE OF USAGE:
group('sampletab',ParameterNames=('p1',('sum','p2')),GroupParameterNames=('p1'))
... makes grouping by p1,p2. For each group it calculates sum of p2 values.
---
"""
# Implements such functions as:
# count,sum,avg,min,max,ssq etc...
# 1) ParameterNames can contain group functions
# 2) GroupParameterNames can't contain group functions
# 3) If ParameterNames contains parameters defined by LET directive,
# it IS visible in the sub-context of GroupParameterNames
# 4) Parameters defined in GroupParameterNames are NOT visible in ParameterNames
# 5) ParameterNames variable represents the structure of the resulting table/collection
# 6) GroupParameterNames can contain either par_names or expressions with par_names
# Clear old GROUP_INDEX value
clearGroupIndex()
# Consistency check
if TableName == DestinationTableName:
raise Exception('TableName and DestinationTableName must be different')
# if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order']
# Prepare the new DestinationTable
RowObjectDefault = getDefaultRowObject(TableName)
VarDictionary = getVarDictionary(RowObjectDefault)
ContextFormat = getContextFormat(RowObjectDefault)
RowObjectDefaultNew = newRowObject(ParameterNames, RowObjectDefault, VarDictionary, ContextFormat)
dropTable(DestinationTableName) # redundant
createTable(DestinationTableName, RowObjectDefaultNew)
# Loop through rows of source Table
# On each iteration group functions update GROUP_INDEX (see description above)
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# STAGE 1: CREATE GROUPS
print('LOOP:')
for RowID in range(0, number_of_rows):
print('--------------------------------')
print('RowID=' + str(RowID))
RowObject = getRowObject(RowID, TableName) # RowObject from source table
VarDictionary = getVarDictionary(RowObject)
print('VarDictionary=' + str(VarDictionary))
# This is a trick which makes evaluateExpression function
# not consider first expression as an operation
GroupParameterNames_ = ['LIST'] + list(GroupParameterNames)
GroupIndexKey = evaluateExpression(GroupParameterNames_, VarDictionary)
# List is an unhashable type in Python!
GroupIndexKey = tuple(GroupIndexKey)
initializeGroup(GroupIndexKey)
print('GROUP_INDEX=' + str(GROUP_INDEX))
ContextFormat = getContextFormat(RowObject)
RowObjectNew = newRowObject(ParameterNames, RowObject, VarDictionary, ContextFormat, GroupIndexKey)
RowIDGroup = GROUP_INDEX[GroupIndexKey]['ROWID']
setRowObject(RowIDGroup, RowObjectNew, DestinationTableName)
# Output result if required
if Output and DestinationTableName == QUERY_BUFFER:
outputTable(DestinationTableName, File=File)
# /GROUPING =========================================================
# EXTRACTING ========================================================
REGEX_INTEGER = '[+-]?\d+'
REGEX_STRING = '[^\s]+'
REGEX_FLOAT_F = '[+-]?\d*\.?\d+'
REGEX_FLOAT_E = '[+-]?\d*\.?\d+[eEfF]?[+-]?\d+'
REGEX_INTEGER_FIXCOL = lambda n: '\d{%d}' % n
REGEX_STRING_FIXCOL = lambda n: '[^\s]{%d}' % n
REGEX_FLOAT_F_FIXCOL = lambda n: '[\+\-\.\d]{%d}' % n
REGEX_FLOAT_E_FIXCOL = lambda n: '[\+\-\.\deEfF]{%d}' % n
# Extract sub-columns from string column
def extractColumns(TableName, SourceParameterName, ParameterFormats, ParameterNames=None, FixCol=False):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
SourceParameterName: name of source column to process (required)
ParameterFormats: c formats of unpacked parameters (required)
ParameterNames: list of resulting parameter names (optional)
FixCol: column-fixed (True) format of source column (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Note, that this function is aimed to do some extra job on
interpreting string parameters which is normally supposed
to be done by the user.
---
EXAMPLE OF USAGE:
extractColumns('sampletab',SourceParameterName='p5',
ParameterFormats=('%d','%d','%d'),
ParameterNames=('p5_1','p5_2','p5_3'))
This example extracts three integer parameters from
a source column 'p5' and puts results in ('p5_1','p5_2','p5_3').
---
"""
# ParameterNames = just the names without expressions
# ParFormats contains python formats for par extraction
# Example: ParameterNames=('v1','v2','v3')
# ParameterFormats=('%1s','%1s','%1s')
# By default the format of parameters is column-fixed
if type(LOCAL_TABLE_CACHE[TableName]['header']['default'][SourceParameterName]) not in set([str, unicode]):
raise Exception('Source parameter must be a string')
i = -1
# bug when (a,) != (a)
if ParameterNames and type(ParameterNames) not in set([list, tuple]):
ParameterNames = [ParameterNames]
if ParameterFormats and type(ParameterFormats) not in set([list, tuple]):
ParameterFormats = [ParameterFormats]
# if ParameterNames is empty, fill it with #1-2-3-...
if not ParameterNames:
ParameterNames = []
# using naming convension #i, i=0,1,2,3...
for par_format in ParameterFormats:
while True:
i += 1
par_name = '#%d' % i
fmt = LOCAL_TABLE_CACHE[TableName]['header']['format'].get(par_name, None)
if not fmt: break
ParameterNames.append(par_name)
# check if ParameterNames are valid
Intersection = set(ParameterNames).intersection(LOCAL_TABLE_CACHE[TableName]['header']['order'])
if Intersection:
raise Exception('Parameters %s already exist' % str(list(Intersection)))
# loop over ParameterNames to prepare LOCAL_TABLE_CACHE
i = 0
for par_name in ParameterNames:
par_format = ParameterFormats[i]
LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name] = par_format
LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
i += 1
# append new parameters in order list
LOCAL_TABLE_CACHE[TableName]['header']['order'] += ParameterNames
# cope with default values
i = 0
format_regex = []
format_types = []
# print 'ParameterNames='+str(ParameterNames)
for par_format in ParameterFormats:
par_name = ParameterNames[i]
regex = FORMAT_PYTHON_REGEX
# print 'par_name: '+par_name
# print 'par_format: '+par_format
(lng, trail, lngpnt, ty) = re.search(regex, par_format).groups()
ty = ty.lower()
if ty == 'd':
par_type = int
if FixCol:
format_regex_part = REGEX_INTEGER_FIXCOL(lng)
else:
format_regex_part = REGEX_INTEGER
elif ty == 's':
par_type = str
if FixCol:
format_regex_part = REGEX_STRING_FIXCOL(lng)
else:
format_regex_part = REGEX_STRING
elif ty == 'f':
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_F_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_F
elif ty == 'e':
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_E_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_E
else:
raise Exception('Unknown data type')
format_regex.append('(' + format_regex_part + ')')
format_types.append(par_type)
def_val = getDefaultValue(par_type)
LOCAL_TABLE_CACHE[TableName]['header']['default'][par_name] = def_val
i += 1
format_regex = '\s*'.join(format_regex)
# print 'format_regex='+str(format_regex)
# return format_regex
# loop through values of SourceParameter
for SourceParameterString in LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName]:
try:
ExtractedValues = list(re.search(format_regex, SourceParameterString).groups())
except:
raise Exception('Error with line \"%s\"' % SourceParameterString)
i = 0
# loop through all parameters which are supposed to be extracted
for par_name in ParameterNames:
# print 'ExtractedValues[i]='+ExtractedValues[i]
# print 'par_name='+par_name
par_value = format_types[i](ExtractedValues[i])
LOCAL_TABLE_CACHE[TableName]['data'][par_name].append(par_value)
i += 1
# explicitly check that number of rows are equal
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
number_of_rows2 = len(LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName])
number_of_rows3 = len(LOCAL_TABLE_CACHE[TableName]['data'][ParameterNames[0]])
if not (number_of_rows == number_of_rows2 == number_of_rows3):
raise Exception('Error while extracting parameters: check your regexp')
# Split string columns into sub-columns with given names
def splitColumn(TableName, SourceParameterName, ParameterNames, Splitter):
pass
# /EXTRACTING =======================================================
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# /LOCAL DATABASE MANAGEMENT SYSTEM
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# GLOBAL API FUNCTIONS
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def mergeParlist(*arg):
# Merge parlists and remove duplicates.
# Argument contains a list of lists/tuples.
container = []
for a in arg:
container += list(a)
result = []
index = set()
for par_name in container:
if par_name not in index:
index.add(par_name)
result.append(par_name)
return result
# Define parameter groups to simplify the usage of fetch_
PARLIST_DOTPAR = ['par_line', ]
PARLIST_ID = ['trans_id', ]
PARLIST_STANDARD = ['molec_id', 'local_iso_id', 'nu', 'sw', 'a', 'elower', 'gamma_air',
'delta_air', 'gamma_self', 'n_air', 'n_self', 'gp', 'gpp']
PARLIST_LABELS = ['statep', 'statepp']
PARLIST_LINEMIXING = ['y_air', 'y_self']
PARLIST_VOIGT_AIR = ['gamma_air', 'delta_air', 'deltap_air', 'n_air']
PARLIST_VOIGT_SELF = ['gamma_self', 'delta_self', 'deltap_self', 'n_self']
PARLIST_VOIGT_H2 = ['gamma_H2', 'delta_H2', 'deltap_H2', 'n_H2']
PARLIST_VOIGT_CO2 = ['gamma_CO2', 'delta_CO2', 'n_CO2']
PARLIST_VOIGT_HE = ['gamma_He', 'delta_He', 'n_He']
PARLIST_VOIGT_ALL = mergeParlist(PARLIST_VOIGT_AIR, PARLIST_VOIGT_SELF,
PARLIST_VOIGT_H2, PARLIST_VOIGT_CO2,
PARLIST_VOIGT_HE)
PARLIST_SDVOIGT_AIR = ['gamma_air', 'delta_air', 'deltap_air', 'n_air', 'SD_air']
PARLIST_SDVOIGT_SELF = ['gamma_self', 'delta_self', 'deltap_self', 'n_self', 'SD_self']
PARLIST_SDVOIGT_H2 = []
PARLIST_SDVOIGT_CO2 = []
PARLIST_SDVOIGT_HE = []
PARLIST_SDVOIGT_ALL = mergeParlist(PARLIST_SDVOIGT_AIR, PARLIST_SDVOIGT_SELF,
PARLIST_SDVOIGT_H2, PARLIST_SDVOIGT_CO2,
PARLIST_SDVOIGT_HE)
PARLIST_GALATRY_AIR = ['gamma_air', 'delta_air', 'deltap_air', 'n_air', 'beta_g_air']
PARLIST_GALATRY_SELF = ['gamma_self', 'delta_self', 'deltap_self', 'n_self', 'beta_g_self']
PARLIST_GALATRY_H2 = []
PARLIST_GALATRY_CO2 = []
PARLIST_GALATRY_HE = []
PARLIST_GALATRY_ALL = mergeParlist(PARLIST_GALATRY_AIR, PARLIST_GALATRY_SELF,
PARLIST_GALATRY_H2, PARLIST_GALATRY_CO2,
PARLIST_GALATRY_HE)
PARLIST_ALL = mergeParlist(PARLIST_ID, PARLIST_DOTPAR, PARLIST_STANDARD,
PARLIST_LABELS, PARLIST_LINEMIXING, PARLIST_VOIGT_ALL,
PARLIST_SDVOIGT_ALL, PARLIST_GALATRY_ALL)
PARAMETER_GROUPS = {
'par_line': PARLIST_DOTPAR,
'160-char': PARLIST_DOTPAR,
'.par': PARLIST_DOTPAR,
'id': PARLIST_ID,
'standard': PARLIST_STANDARD,
'labels': PARLIST_LABELS,
'linemixing': PARLIST_LINEMIXING,
'voigt_air': PARLIST_VOIGT_AIR,
'voigt_self': PARLIST_VOIGT_SELF,
'voigt_h2': PARLIST_VOIGT_H2,
'voigt_co2': PARLIST_VOIGT_CO2,
'voigt_he': PARLIST_VOIGT_HE,
'voigt': PARLIST_VOIGT_ALL,
'sdvoigt_air': PARLIST_SDVOIGT_AIR,
'sdvoigt_self': PARLIST_SDVOIGT_SELF,
'sdvoigt_h2': PARLIST_SDVOIGT_H2,
'sdvoigt_co2': PARLIST_SDVOIGT_CO2,
'sdvoigt_he': PARLIST_SDVOIGT_HE,
'sdvoigt': PARLIST_SDVOIGT_ALL,
'galatry_air': PARLIST_GALATRY_AIR,
'galatry_self': PARLIST_GALATRY_SELF,
'galatry_h2': PARLIST_GALATRY_H2,
'galatry_co2': PARLIST_GALATRY_CO2,
'galatry_he': PARLIST_GALATRY_HE,
'galatry': PARLIST_GALATRY_ALL,
'all': PARLIST_ALL
}
def prepareParlist(pargroups=[], params=[], dotpar=True):
# Apply defaults
parlist_default = []
if dotpar:
parlist_default += ['par_line']
# parlist_default += PARAMETER_GROUPS['id']
# Make a dictionary of "assumed" parameters.
ASSUMED_PARAMS = {}
if 'par_line' in set(parlist_default):
ASSUMED_PARAMS = HITRAN_DEFAULT_HEADER['format']
parlist = parlist_default
# Iterate over parameter groups.
for pargroup in pargroups:
pargroup = pargroup.lower()
parlist += PARAMETER_GROUPS[pargroup]
# Iterate over single parameters.
for param in params:
param = param.lower()
parlist.append(param)
# Clean up parameter list.
parlist = mergeParlist(parlist)
result = []
for param in parlist:
if param not in ASSUMED_PARAMS:
result.append(param)
return result
def prepareHeader(parlist):
HEADER = {'table_name': '', 'number_of_rows': -1, 'format': {},
'default': {}, 'table_type': 'column-fixed',
'size_in_bytes': -1, 'order': [], 'description': {}}
# Add column-fixed 160-character part, if specified in parlist.
if 'par_line' in set(parlist):
HEADER['order'] = HITRAN_DEFAULT_HEADER['order']
HEADER['format'] = HITRAN_DEFAULT_HEADER['format']
HEADER['default'] = HITRAN_DEFAULT_HEADER['default']
HEADER['description'] = HITRAN_DEFAULT_HEADER['description']
# Insert all other parameters in the "extra" section of the header.
# while 'par_line' in parlist: parlist.remove('par_line')
plist = [v for v in parlist if v != 'par_line']
HEADER['extra'] = []
HEADER['extra_format'] = {}
HEADER['extra_separator'] = ','
for param in plist:
param = param.lower()
HEADER['extra'].append(param)
HEADER['extra_format'][param] = PARAMETER_META[param]['default_fmt']
return HEADER
def queryHITRAN(TableName, iso_id_list, numin, numax, pargroups=[], params=[], dotpar=True, head=False):
# import httplib
# conn = httplib.HTTPConnection('hitranazure.cloudapp.com')
# conn.Request('')
# r = conn.getresponse()
# print r.status, r.reason
# data1 = data1.read
# TableHeader = HITRAN_DEFAULT_HEADER # deprecated
ParameterList = prepareParlist(pargroups=pargroups, params=params, dotpar=dotpar)
TableHeader = prepareHeader(ParameterList)
TableHeader['table_name'] = TableName
DataFileName = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.data'
HeaderFileName = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.header'
# if TableName in LOCAL_TABLE_CACHE.keys():
# raise Exception('Table \"%s\" exists' % TableName)
# if os.path.isfile(DataFileName):
# raise Exception('File \"%s\" exists' % DataFileName)
# if os.path.isfile(HeaderFileName):
# raise Exception('!!File \"%s\" exists' % HeaderFileName)
# create URL
iso_id_list_str = [str(iso_id) for iso_id in iso_id_list]
iso_id_list_str = ','.join(iso_id_list_str)
# url = 'http://hitran.cloudapp.net' + '/lbl/5?' + \
# url = 'http://hitranazure.cloudapp.net' + '/lbl/5?' + \
# 'iso_ids_list=' + iso_id_list_str + '&' + \
# 'numin=' + str(numin) + '&' + \
# 'numax=' + str(numax) + '&' + \
# 'access=api' + '&' + \
# 'key=' + GLOBAL_HITRAN_APIKEY
if pargroups or params: # custom par search
url = GLOBAL_HOST + '/lbl/api?' + \
'iso_ids_list=' + iso_id_list_str + '&' + \
'numin=' + str(numin) + '&' + \
'numax=' + str(numax) + '&' + \
'head=' + str(head) + '&' + \
'fixwidth=0&sep=[comma]&' + \
'request_params=' + ','.join(ParameterList)
else: # old-fashioned .par search
url = GLOBAL_HOST + '/lbl/api?' + \
'iso_ids_list=' + iso_id_list_str + '&' + \
'numin=' + str(numin) + '&' + \
'numax=' + str(numax)
# raise Exception(url)
# Download data by chunks.
try:
req = urllib2.urlopen(url)
except urllib2.HTTPError:
raise Exception('Failed to retrieve data for given parameters.')
except urllib2.URLError:
raise Exception('Cannot connect to %s. Try again or edit GLOBAL_HOST variable.' % GLOBAL_HOST)
# CHUNK = 16 * 1024 # default value
CHUNK = 64 * 1024
print('BEGIN DOWNLOAD: ' + TableName)
with open(DataFileName, 'w') as fp:
while True:
chunk = req.read(CHUNK)
if not chunk: break
fp.write(chunk.decode('utf-8'))
print(' %d bytes written to %s' % (CHUNK, DataFileName))
with open(HeaderFileName, 'w') as fp:
fp.write(json.dumps(TableHeader, indent=2))
print('Header written to %s' % HeaderFileName)
print('END DOWNLOAD')
# Set comment
# Get this table to LOCAL_TABLE_CACHE
storage2cache(TableName)
print('PROCESSED')
# NODE CODE
NODE_READY = False
# Node initialization
def nodeInit():
# very unoptimal, since it loads all tables in memory!!
# loadCache()
databaseBegin() # DB backend level, start transaction
NODE_READY = True
# returns a table instance created from Query object
def globalSelectInto(NewTablePath, SourceTablePath, ParameterNames, Conditions):
# creates table from parsed data
# and store it in the database DB
dbname, tablename, nodename = NewTablePath.split('::')
dbname1, tablename1, nodename1 = SourceTablePath.split('::')
if not NODE_READY: raise Exception('Node \"%s\" is not ready. Call nodeInit()' % NODE_NAME)
# should get rid of selectLocal as planning to use network interface
# ...... selectLocal OR selectRemote
pass
# ---------------------------------------------------------------
# query_string - query written in the
# formal language of local database frontend
def makeQuery(query_string, Connection=GLOBAL_CONNECTION):
# makes a query to remote server
# using connection instance
pass
# ---------- DATABASE FRONTEND END -------------
# simple implementation of getting a line list from a remote server
def getLinelist(local_name, query, api_key):
return makeQuery(local_name)
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# / GLOBABL API FUNCTIONS
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# ---------------- FILTER ---------------------------------------------
def filter(TableName, Conditions):
select(TableName=TableName, Conditions=Conditions, Output=False)
# ---------------------- ISO.PY ---------------------------------------
ISO_ID_INDEX = {
'M': 0,
'I': 1,
'iso_name': 2,
'abundance': 3,
'mass': 4,
'mol_name': 5
}
# id M I iso_name abundance mass mol_name
ISO_ID = {
1: [1, 1, 'H2(16O)', 0.997317, 18.010565, 'H2O'],
2: [1, 2, 'H2(18O)', 0.00199983, 20.014811, 'H2O'],
3: [1, 3, 'H2(17O)', 0.000372, 19.01478, 'H2O'],
4: [1, 4, 'HD(16O)', 0.00031069, 19.01674, 'H2O'],
5: [1, 5, 'HD(18O)', 0.000000623, 21.020985, 'H2O'],
6: [1, 6, 'HD(17O)', 0.000000116, 20.020956, 'H2O'],
7: [2, 1, '(12C)(16O)2', 0.9842, 43.98983, 'CO2'],
8: [2, 2, '(13C)(16O)2', 0.01106, 44.993185, 'CO2'],
9: [2, 3, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2'],
10: [2, 4, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2'],
11: [2, 5, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2'],
12: [2, 6, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2'],
13: [2, 7, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2'],
14: [2, 8, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2'],
15: [2, 0, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2'],
120: [2, 11, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2'],
121: [2, 9, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2'],
16: [3, 1, '(16O)3', 0.992901, 47.984745, 'O3'],
17: [3, 2, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3'],
18: [3, 3, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3'],
19: [3, 4, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3'],
20: [3, 5, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3'],
21: [4, 1, '(14N)2(16O)', 0.990333, 44.001062, 'N2O'],
22: [4, 2, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O'],
23: [4, 3, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O'],
24: [4, 4, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O'],
25: [4, 5, '(14N)2(17O)', 0.000369, 45.005278, 'N2O'],
26: [5, 1, '(12C)(16O)', 0.98654, 27.994915, 'CO'],
27: [5, 2, '(13C)(16O)', 0.01108, 28.99827, 'CO'],
28: [5, 3, '(12C)(18O)', 0.0019782, 29.999161, 'CO'],
29: [5, 4, '(12C)(17O)', 0.000368, 28.99913, 'CO'],
30: [5, 5, '(13C)(18O)', 0.00002222, 31.002516, 'CO'],
31: [5, 6, '(13C)(17O)', 0.00000413, 30.002485, 'CO'],
32: [6, 1, '(12C)H4', 0.98827, 16.0313, 'CH4'],
33: [6, 2, '(13C)H4', 0.0111, 17.034655, 'CH4'],
34: [6, 3, '(12C)H3D', 0.00061575, 17.037475, 'CH4'],
35: [6, 4, '(13C)H3D', 0.0000049203, 18.04083, 'CH4'],
36: [7, 1, '(16O)2', 0.995262, 31.98983, 'O2'],
37: [7, 2, '(16O)(18O)', 0.00399141, 33.994076, 'O2'],
38: [7, 3, '(16O)(17O)', 0.000742, 32.994045, 'O2'],
39: [8, 1, '(14N)(16O)', 0.993974, 29.997989, 'NO'],
40: [8, 2, '(15N)(16O)', 0.0036543, 30.995023, 'NO'],
41: [8, 3, '(14N)(18O)', 0.00199312, 32.002234, 'NO'],
42: [9, 1, '(32S)(16O)2', 0.94568, 63.961901, 'SO2'],
43: [9, 2, '(34S)(16O)2', 0.04195, 65.957695, 'SO2'],
44: [10, 1, '(14N)(16O)2', 0.991616, 45.992904, 'NO2'],
45: [11, 1, '(14N)H3', 0.9958715, 17.026549, 'NH3'],
46: [11, 2, '(15N)H3', 0.0036613, 18.023583, 'NH3'],
47: [12, 1, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3'],
117: [12, 2, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3'],
48: [13, 1, '(16O)H', 0.997473, 17.00274, 'OH'],
49: [13, 2, '(18O)H', 0.00200014, 19.006986, 'OH'],
50: [13, 3, '(16O)D', 0.00015537, 18.008915, 'OH'],
51: [14, 1, 'H(19F)', 0.99984425, 20.006229, 'HF'],
110: [14, 2, 'D(19F)', 0.000115, 21.0125049978, 'HF'],
52: [15, 1, 'H(35Cl)', 0.757587, 35.976678, 'HCl'],
53: [15, 2, 'H(37Cl)', 0.242257, 37.973729, 'HCl'],
107: [15, 3, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl'],
108: [15, 4, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl'],
54: [16, 1, 'H(79Br)', 0.50678, 79.92616, 'HBr'],
55: [16, 2, 'H(81Br)', 0.49306, 81.924115, 'HBr'],
111: [16, 3, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr'],
112: [16, 4, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr'],
56: [17, 1, 'H(127I)', 0.99984425, 127.912297, 'HI'],
113: [17, 2, 'D(127I)', 0.000115, 128.918574778, 'HI'],
57: [18, 1, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO'],
58: [18, 2, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO'],
59: [19, 1, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS'],
60: [19, 2, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS'],
61: [19, 3, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS'],
62: [19, 4, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS'],
63: [19, 5, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS'],
64: [20, 1, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO'],
65: [20, 2, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO'],
66: [20, 3, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO'],
67: [21, 1, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl'],
68: [21, 2, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl'],
69: [22, 1, '(14N)2', 0.9926874, 28.006147, 'N2'],
118: [22, 2, '(14N)(15N)', 0.0072535, 29.997989, 'N2'],
70: [23, 1, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN'],
71: [23, 2, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN'],
72: [23, 3, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN'],
73: [24, 1, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl'],
74: [24, 2, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl'],
75: [25, 1, 'H2(16O)2', 0.994952, 34.00548, 'H2O2'],
76: [26, 1, '(12C)2H2', 0.9776, 26.01565, 'C2H2'],
77: [26, 2, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2'],
105: [26, 3, '(12C)2HD', 0.00030455, 27.021825, 'C2H2'],
78: [27, 1, '(12C)2H6', 0.97699, 30.04695, 'C2H6'],
106: [27, 2, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6'],
79: [28, 1, '(31P)H3', 0.99953283, 33.997238, 'PH3'],
80: [29, 1, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2'],
119: [29, 2, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2'],
81: [31, 1, 'H2(32S)', 0.94988, 33.987721, 'H2S'],
82: [31, 2, 'H2(34S)', 0.04214, 35.983515, 'H2S'],
83: [31, 3, 'H2(33S)', 0.007498, 34.987105, 'H2S'],
84: [32, 1, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH'],
85: [33, 1, 'H(16O)2', 0.995107, 32.997655, 'HO2'],
86: [34, 1, '(16O)', 0.997628, 15.994915, 'O'],
87: [36, 1, '(14N)(16O)+', 0.993974, 29.997989, 'NOp'],
88: [37, 1, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr'],
89: [37, 2, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr'],
90: [38, 1, '(12C)2H4', 0.9773, 28.0313, 'C2H4'],
91: [38, 2, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4'],
92: [39, 1, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH'],
93: [40, 1, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br'],
94: [40, 2, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br'],
95: [41, 1, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN'],
96: [42, 1, '(12C)(19F)4', 0.9893, 87.993616, 'CF4'],
116: [43, 1, '(12C)4H2', 0.955998, 50.01565, 'C4H2'],
109: [44, 1, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N'],
103: [45, 1, 'H2', 0.999688, 2.01565, 'H2'],
115: [45, 2, 'HD', 0.00022997, 3.021825, 'H2'],
97: [46, 1, '(12C)(32S)', 0.939624, 43.971036, 'CS'],
98: [46, 2, '(12C)(34S)', 0.0416817, 45.966787, 'CS'],
99: [46, 3, '(13C)(32S)', 0.0105565, 44.974368, 'CS'],
100: [46, 4, '(12C)(33S)', 0.00741668, 44.970399, 'CS'],
114: [47, 1, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3'],
101: [1001, 1, 'H', None, None, 'H'],
102: [1002, 1, 'He', None, None, 'He'],
104: [1018, 1, 'Ar', None, None, 'Ar'],
}
ISO_INDEX = {
'id': 0,
'iso_name': 1,
'abundance': 2,
'mass': 3,
'mol_name': 4
}
# M I id iso_name abundance mass mol_name
ISO = {
(1, 1): [1, 'H2(16O)', 0.997317, 18.010565, 'H2O'],
(1, 2): [2, 'H2(18O)', 0.00199983, 20.014811, 'H2O'],
(1, 3): [3, 'H2(17O)', 0.000372, 19.01478, 'H2O'],
(1, 4): [4, 'HD(16O)', 0.00031069, 19.01674, 'H2O'],
(1, 5): [5, 'HD(18O)', 0.000000623, 21.020985, 'H2O'],
(1, 6): [6, 'HD(17O)', 0.000000116, 20.020956, 'H2O'],
(2, 1): [7, '(12C)(16O)2', 0.9842, 43.98983, 'CO2'],
(2, 2): [8, '(13C)(16O)2', 0.01106, 44.993185, 'CO2'],
(2, 3): [9, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2'],
(2, 4): [10, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2'],
(2, 5): [11, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2'],
(2, 6): [12, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2'],
(2, 7): [13, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2'],
(2, 8): [14, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2'],
(2, 0): [15, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2'],
(2, 11): [120, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2'],
(2, 9): [121, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2'],
(3, 1): [16, '(16O)3', 0.992901, 47.984745, 'O3'],
(3, 2): [17, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3'],
(3, 3): [18, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3'],
(3, 4): [19, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3'],
(3, 5): [20, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3'],
(4, 1): [21, '(14N)2(16O)', 0.990333, 44.001062, 'N2O'],
(4, 2): [22, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O'],
(4, 3): [23, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O'],
(4, 4): [24, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O'],
(4, 5): [25, '(14N)2(17O)', 0.000369, 45.005278, 'N2O'],
(5, 1): [26, '(12C)(16O)', 0.98654, 27.994915, 'CO'],
(5, 2): [27, '(13C)(16O)', 0.01108, 28.99827, 'CO'],
(5, 3): [28, '(12C)(18O)', 0.0019782, 29.999161, 'CO'],
(5, 4): [29, '(12C)(17O)', 0.000368, 28.99913, 'CO'],
(5, 5): [30, '(13C)(18O)', 0.00002222, 31.002516, 'CO'],
(5, 6): [31, '(13C)(17O)', 0.00000413, 30.002485, 'CO'],
(6, 1): [32, '(12C)H4', 0.98827, 16.0313, 'CH4'],
(6, 2): [33, '(13C)H4', 0.0111, 17.034655, 'CH4'],
(6, 3): [34, '(12C)H3D', 0.00061575, 17.037475, 'CH4'],
(6, 4): [35, '(13C)H3D', 0.0000049203, 18.04083, 'CH4'],
(7, 1): [36, '(16O)2', 0.995262, 31.98983, 'O2'],
(7, 2): [37, '(16O)(18O)', 0.00399141, 33.994076, 'O2'],
(7, 3): [38, '(16O)(17O)', 0.000742, 32.994045, 'O2'],
(8, 1): [39, '(14N)(16O)', 0.993974, 29.997989, 'NO'],
(8, 2): [40, '(15N)(16O)', 0.0036543, 30.995023, 'NO'],
(8, 3): [41, '(14N)(18O)', 0.00199312, 32.002234, 'NO'],
(9, 1): [42, '(32S)(16O)2', 0.94568, 63.961901, 'SO2'],
(9, 2): [43, '(34S)(16O)2', 0.04195, 65.957695, 'SO2'],
(10, 1): [44, '(14N)(16O)2', 0.991616, 45.992904, 'NO2'],
(11, 1): [45, '(14N)H3', 0.9958715, 17.026549, 'NH3'],
(11, 2): [46, '(15N)H3', 0.0036613, 18.023583, 'NH3'],
(12, 1): [47, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3'],
(12, 2): [117, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3'],
(13, 1): [48, '(16O)H', 0.997473, 17.00274, 'OH'],
(13, 2): [49, '(18O)H', 0.00200014, 19.006986, 'OH'],
(13, 3): [50, '(16O)D', 0.00015537, 18.008915, 'OH'],
(14, 1): [51, 'H(19F)', 0.99984425, 20.006229, 'HF'],
(14, 2): [110, 'D(19F)', 0.000115, 21.0125049978, 'HF'],
(15, 1): [52, 'H(35Cl)', 0.757587, 35.976678, 'HCl'],
(15, 2): [53, 'H(37Cl)', 0.242257, 37.973729, 'HCl'],
(15, 3): [107, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl'],
(15, 4): [108, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl'],
(16, 1): [54, 'H(79Br)', 0.50678, 79.92616, 'HBr'],
(16, 2): [55, 'H(81Br)', 0.49306, 81.924115, 'HBr'],
(16, 3): [111, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr'],
(16, 4): [112, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr'],
(17, 1): [56, 'H(127I)', 0.99984425, 127.912297, 'HI'],
(17, 2): [113, 'D(127I)', 0.000115, 128.918574778, 'HI'],
(18, 1): [57, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO'],
(18, 2): [58, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO'],
(19, 1): [59, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS'],
(19, 2): [60, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS'],
(19, 3): [61, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS'],
(19, 4): [62, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS'],
(19, 5): [63, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS'],
(20, 1): [64, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO'],
(20, 2): [65, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO'],
(20, 3): [66, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO'],
(21, 1): [67, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl'],
(21, 2): [68, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl'],
(22, 1): [69, '(14N)2', 0.9926874, 28.006147, 'N2'],
(22, 2): [118, '(14N)(15N)', 0.0072535, 29.997989, 'N2'],
(23, 1): [70, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN'],
(23, 2): [71, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN'],
(23, 3): [72, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN'],
(24, 1): [73, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl'],
(24, 2): [74, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl'],
(25, 1): [75, 'H2(16O)2', 0.994952, 34.00548, 'H2O2'],
(26, 1): [76, '(12C)2H2', 0.9776, 26.01565, 'C2H2'],
(26, 2): [77, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2'],
(26, 3): [105, '(12C)2HD', 0.00030455, 27.021825, 'C2H2'],
(27, 1): [78, '(12C)2H6', 0.97699, 30.04695, 'C2H6'],
(27, 2): [106, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6'],
(28, 1): [79, '(31P)H3', 0.99953283, 33.997238, 'PH3'],
(29, 1): [80, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2'],
(29, 2): [119, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2'],
(31, 1): [81, 'H2(32S)', 0.94988, 33.987721, 'H2S'],
(31, 2): [82, 'H2(34S)', 0.04214, 35.983515, 'H2S'],
(31, 3): [83, 'H2(33S)', 0.007498, 34.987105, 'H2S'],
(32, 1): [84, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH'],
(33, 1): [85, 'H(16O)2', 0.995107, 32.997655, 'HO2'],
(34, 1): [86, '(16O)', 0.997628, 15.994915, 'O'],
(36, 1): [87, '(14N)(16O)+', 0.993974, 29.997989, 'NOp'],
(37, 1): [88, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr'],
(37, 2): [89, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr'],
(38, 1): [90, '(12C)2H4', 0.9773, 28.0313, 'C2H4'],
(38, 2): [91, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4'],
(39, 1): [92, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH'],
(40, 1): [93, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br'],
(40, 2): [94, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br'],
(41, 1): [95, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN'],
(42, 1): [96, '(12C)(19F)4', 0.9893, 87.993616, 'CF4'],
(43, 1): [116, '(12C)4H2', 0.955998, 50.01565, 'C4H2'],
(44, 1): [109, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N'],
(45, 1): [103, 'H2', 0.999688, 2.01565, 'H2'],
(45, 2): [115, 'HD', 0.00022997, 3.021825, 'H2'],
(46, 1): [97, '(12C)(32S)', 0.939624, 43.971036, 'CS'],
(46, 2): [98, '(12C)(34S)', 0.0416817, 45.966787, 'CS'],
(46, 3): [99, '(13C)(32S)', 0.0105565, 44.974368, 'CS'],
(46, 4): [100, '(12C)(33S)', 0.00741668, 44.970399, 'CS'],
(47, 1): [114, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3'],
(1001, 1): [101, 'H', None, None, 'H'],
(1002, 1): [102, 'He', None, None, 'He'],
(1018, 1): [104, 'Ar', None, None, 'Ar'],
}
def print_iso():
print('The dictionary \"ISO\" contains information on isotopologues in HITRAN\n')
print(' M I id iso_name abundance mass mol_name')
for i in ISO:
ab = ISO[i][ISO_INDEX['abundance']]
ma = ISO[i][ISO_INDEX['mass']]
ab = ab if ab else -1
ma = ma if ma else -1
print('%4i %4i : %5i %25s %10f %10f %15s' % (
i[0], i[1], ISO[i][ISO_INDEX['id']], ISO[i][ISO_INDEX['iso_name']], ab, ma, ISO[i][ISO_INDEX['mol_name']]))
def print_iso_id():
print('The dictionary \"ISO_ID\" contains information on \"global\" IDs of isotopologues in HITRAN\n')
print(' id M I iso_name abundance mass mol_name')
for i in ISO_ID:
ab = ISO_ID[i][ISO_ID_INDEX['abundance']]
ma = ISO_ID[i][ISO_ID_INDEX['mass']]
ab = ab if ab else -1
ma = ma if ma else -1
print('%5i : %4i %4i %25s %15.10f %10f %15s' % (
i, ISO_ID[i][ISO_ID_INDEX['M']], ISO_ID[i][ISO_ID_INDEX['I']], ISO_ID[i][ISO_ID_INDEX['iso_name']], ab, ma,
ISO_ID[i][ISO_ID_INDEX['mol_name']]))
profiles = 'profiles'
def print_profiles():
print('Profiles available:')
print(' HT : PROFILE_HT')
print(' SDRautian : PROFILE_SDRAUTIAN')
print(' Rautian : PROFILE_RAUTIAN')
print(' SDVoigt : PROFILE_SDVOIGT')
print(' Voigt : PROFILE_VOIGT')
print(' Lorentz : PROFILE_LORENTZ')
print(' Doppler : PROFILE_DOPPLER')
slit_functions = 'slit_functions'
def print_slit_functions():
print(' RECTANGULAR : SLIT_RECTANGULAR')
print(' TRIANGULAR : SLIT_TRIANGULAR')
print(' GAUSSIAN : SLIT_GAUSSIAN')
print(' DIFFRACTION : SLIT_DIFFRACTION')
print(' MICHELSON : SLIT_MICHELSON')
print(' DISPERSION/LORENTZ : SLIT_DISPERSION')
tutorial = 'tutorial'
units = 'units'
index = 'index'
data = 'data'
spectra = 'spectra'
plotting = 'plotting'
python = 'python'
python_tutorial_text = \
"""
THIS TUTORIAL IS TAKEN FROM http://www.stavros.io/tutorials/python/
AUTHOR: Stavros Korokithakis
----- LEARN PYTHON IN 10 MINUTES -----
PRELIMINARY STUFF
So, you want to learn the Python programming language but can't find a concise
and yet full-featured tutorial. This tutorial will attempt to teach you Python in 10 minutes.
It's probably not so much a tutorial as it is a cross between a tutorial and a cheatsheet,
so it will just show you some basic concepts to start you off. Obviously, if you want to
really learn a language you need to program in it for a while. I will assume that you are
already familiar with programming and will, therefore, skip most of the non-language-specific stuff.
The important keywords will be highlighted so you can easily spot them. Also, pay attention because,
due to the terseness of this tutorial, some things will be introduced directly in code and only
briefly commented on.
PROPERTIES
Python is strongly typed (i.e. types are enforced), dynamically, implicitly typed (i.e. you don't
have to declare variables), case sensitive (i.e. var and VAR are two different variables) and
object-oriented (i.e. everything is an object).
GETTING HELP
Help in Python is always available right in the interpreter. If you want to know how an object works,
all you have to do is call help(<object>)! Also useful are dir(), which shows you all the object's methods,
and <object>.__doc__, which shows you its documentation string:
>>> help(5)
Help on int object:
(etc etc)
>>> dir(5)
['__abs__', '__add__', ...]
>>> abs.__doc__
'abs(number) -> number
Return the absolute value of the argument.'
SYNTAX
Python has no mandatory statement termination characters and blocks are specified by indentation.
Indent to begin a block, dedent to end one. Statements that expect an indentation level end in a colon (:).
Comments start with the pound (#) sign and are single-line, multi-line strings are used for multi-line comments.
Values are assigned (in fact, objects are bound to names) with the _equals_ sign ("="), and equality testing is
done using two _equals_ signs ("=="). You can increment/decrement values using the += and -= operators respectively
by the right-hand amount. This works on many datatypes, strings included. You can also use multiple variables on one
line. For example:
>>> myvar = 3
>>> myvar += 2
>>> myvar
5
>>> myvar -= 1
>>> myvar
4
\"\"\"This is a multiline comment.
The following lines concatenate the two strings.\"\"\"
>>> mystring = "Hello"
>>> mystring += " world."
>>> print mystring
Hello world.
# This swaps the variables in one line(!).
# It doesn't violate strong typing because values aren't
# actually being assigned, but new objects are bound to
# the old names.
>>> myvar, mystring = mystring, myvar
DATA TYPES
The data structures available in python are lists, tuples and dictionaries.
Sets are available in the sets library (but are built-in in Python 2.5 and later).
Lists are like one-dimensional arrays (but you can also have lists of other lists),
dictionaries are associative arrays (a.k.a. hash tables) and tuples are immutable
one-dimensional arrays (Python "arrays" can be of any type, so you can mix e.g. integers,
strings, etc in lists/dictionaries/tuples). The index of the first item in all array types is 0.
Negative numbers count from the end towards the beginning, -1 is the last item. Variables
can point to functions. The usage is as follows:
>>> sample = [1, ["another", "list"], ("a", "tuple")]
>>> mylist = ["List item 1", 2, 3.14]
>>> mylist[0] = "List item 1 again" # We're changing the item.
>>> mylist[-1] = 3.21 # Here, we refer to the last item.
>>> mydict = {"Key 1": "Value 1", 2: 3, "pi": 3.14}
>>> mydict["pi"] = 3.15 # This is how you change dictionary values.
>>> mytuple = (1, 2, 3)
>>> myfunction = len
>>> print myfunction(mylist)
3
You can access array ranges using a colon (:). Leaving the start index empty assumes the first item,
leaving the end index assumes the last item. Negative indexes count from the last item backwards
(thus -1 is the last item) like so:
>>> mylist = ["List item 1", 2, 3.14]
>>> print mylist[:]
['List item 1', 2, 3.1400000000000001]
>>> print mylist[0:2]
['List item 1', 2]
>>> print mylist[-3:-1]
['List item 1', 2]
>>> print mylist[1:]
[2, 3.14]
# Adding a third parameter, "step" will have Python step in
# N item increments, rather than 1.
# E.g., this will return the first item, then go to the third and
# return that (so, items 0 and 2 in 0-indexing).
>>> print mylist[::2]
['List item 1', 3.14]
STRINGS
Its strings can use either single or double quotation marks, and you can have quotation
marks of one kind inside a string that uses the other kind (i.e. "He said 'hello'." is valid).
Multiline strings are enclosed in _triple double (or single) quotes_ (\"\"\").
Python supports Unicode out of the box, using the syntax u"This is a unicode string".
To fill a string with values, you use the % (modulo) operator and a tuple.
Each %s gets replaced with an item from the tuple, left to right, and you can also use
dictionary substitutions, like so:
>>>print "Name: %s\
Number: %s\
String: %s" % (myclass.name, 3, 3 * "-")
Name: Poromenos
Number: 3
String: ---
strString = \"\"\"This is
a multiline
string.\"\"\"
# WARNING: Watch out for the trailing s in "%(key)s".
>>> print "This %(verb)s a %(noun)s." % {"noun": "test", "verb": "is"}
This is a test.
FLOW CONTROL STATEMENTS
Flow control statements are if, for, and while. There is no select; instead, use if.
Use for to enumerate through members of a list. To obtain a list of numbers,
use range(<number>). These statements' syntax is thus:
rangelist = range(10)
>>> print rangelist
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> for number in rangelist:
# Check if number is one of
# the numbers in the tuple.
if number in (3, 4, 7, 9):
# "Break" terminates a for without
# executing the "else" clause.
break
else:
# "Continue" starts the next iteration
# of the loop. It's rather useless here,
# as it's the last statement of the loop.
continue
else:
# The "else" clause is optional and is
# executed only if the loop didn't "break".
pass # Do nothing
>>> if rangelist[1] == 2:
print "The second item (lists are 0-based) is 2"
elif rangelist[1] == 3:
print "The second item (lists are 0-based) is 3"
else:
print "Dunno"
>>> while rangelist[1] == 1:
pass
FUNCTIONS
Functions are declared with the "def" keyword. Optional arguments are set in
the function declaration after the mandatory arguments by being assigned a default
value. For named arguments, the name of the argument is assigned a value.
Functions can return a tuple (and using tuple unpacking you can effectively return
multiple values). Lambda functions are ad hoc functions that are comprised of
a single statement. Parameters are passed by reference, but immutable types (tuples,
ints, strings, etc) *cannot be changed*. This is because only the memory location of
the item is passed, and binding another object to a variable discards the old one,
so immutable types are replaced. For example:
# Same as def funcvar(x): return x + 1
>>> funcvar = lambda x: x + 1
>>> print funcvar(1)
2
# an_int and a_string are optional, they have default values
# if one is not passed (2 and "A default string", respectively).
>>> def passing_example(a_list, an_int=2, a_string="A default string"):
a_list.append("A new item")
an_int = 4
return a_list, an_int, a_string
>>> my_list = [1, 2, 3]
>>> my_int = 10
>>> print passing_example(my_list, my_int)
([1, 2, 3, 'A new item'], 4, "A default string")
>>> my_list
[1, 2, 3, 'A new item']
>>> my_int
10
CLASSES
Python supports a limited form of multiple inheritance in classes.
Private variables and methods can be declared (by convention, this is not enforced
by the language) by adding at least two leading underscores and at most one trailing
one (e.g. "__spam"). We can also bind arbitrary names to class instances.
An example follows:
>>> class MyClass(object):
common = 10
def __init__(self):
self.myvariable = 3
def myfunction(self, arg1, arg2):
return self.myvariable
# This is the class instantiation
>>> classinstance = MyClass()
>>> classinstance.myfunction(1, 2)
3
# This variable is shared by all classes.
>>> classinstance2 = MyClass()
>>> classinstance.common
10
>>> classinstance2.common
10
# Note how we use the class name
# instead of the instance.
>>> MyClass.common = 30
>>> classinstance.common
30
>>> classinstance2.common
30
# This will not update the variable on the class,
# instead it will bind a new object to the old
# variable name.
>>> classinstance.common = 10
>>> classinstance.common
10
>>> classinstance2.common
30
>>> MyClass.common = 50
# This has not changed, because "common" is
# now an instance variable.
>>> classinstance.common
10
>>> classinstance2.common
50
# This class inherits from MyClass. The example
# class above inherits from "object", which makes
# it what's called a "new-style class".
# Multiple inheritance is declared as:
# class OtherClass(MyClass1, MyClass2, MyClassN)
>>> class OtherClass(MyClass):
# The "self" argument is passed automatically
# and refers to the class instance, so you can set
# instance variables as above, but from inside the class.
def __init__(self, arg1):
self.myvariable = 3
print arg1
>>> classinstance = OtherClass("hello")
hello
>>> classinstance.myfunction(1, 2)
3
# This class doesn't have a .test member, but
# we can add one to the instance anyway. Note
# that this will only be a member of classinstance.
>>> classinstance.test = 10
>>> classinstance.test
10
EXCEPTIONS
Exceptions in Python are handled with try-except [exceptionname] blocks:
>>> def some_function():
try:
# Division by zero raises an exception
10 / 0
except ZeroDivisionError:
print "Oops, invalid."
else:
# Exception didn't occur, we're good.
pass
finally:
# This is executed after the code block is run
# and all exceptions have been handled, even
# if a new exception is raised while handling.
print "We're done with that."
>>> some_function()
Oops, invalid.
We're done with that.
IMPORTING:
External libraries are used with the import [libname] keyword.
You can also use from [libname] import [funcname] for individual functions.
Here is an example:
>>> import random
>>> from time import clock
>>> randomint = random.randint(1, 100)
>>> print randomint
64
FILE I/O
Python has a wide array of libraries built in. As an example, here is how serializing
(converting data structures to strings using the pickle library) with file I/O is used:
>>> import pickle
>>> mylist = ["This", "is", 4, 13327]
# Open the file C:\\binary.dat for writing. The letter r before the
# filename string is used to prevent backslash escaping.
>>> yfile = open(r"C:\\binary.dat", "w")
>>> pickle.dump(mylist, myfile)
>>> myfile.close()
>>> myfile = open(r"C:\\text.txt", "w")
>>> myfile.write("This is a sample string")
>>> myfile.close()
>>> myfile = open(r"C:\\text.txt")
>>> print myfile.read()
'This is a sample string'
>>> myfile.close()
# Open the file for reading.
>>> myfile = open(r"C:\\binary.dat")
>>> loadedlist = pickle.load(myfile)
>>> myfile.close()
>>> print loadedlist
['This', 'is', 4, 13327]
MISCELLANEOUS
-> Conditions can be chained. 1 < a < 3 checks
that a is both less than 3 and greater than 1.
-> You can use del to delete variables or items in arrays.
-> List comprehensions provide a powerful way to create
and manipulate lists. They consist of an expression
followed by a for clause followed by zero or more
if or for clauses, like so:
>>> lst1 = [1, 2, 3]
>>> lst2 = [3, 4, 5]
>>> print [x * y for x in lst1 for y in lst2]
[3, 4, 5, 6, 8, 10, 9, 12, 15]
>>> print [x for x in lst1 if 4 > x > 1]
[2, 3]
# Check if a condition is true for any items.
# "any" returns true if any item in the list is true.
>>> any([i % 3 for i in [3, 3, 4, 4, 3]])
True
# This is because 4 % 3 = 1, and 1 is true, so any()
# returns True.
# Check for how many items a condition is true.
>>> sum(1 for i in [3, 3, 4, 4, 3] if i == 4)
2
>>> del lst1[0]
>>> print lst1
[2, 3]
>>> del lst1
-> Global variables are declared outside of functions
and can be read without any special declarations,
but if you want to write to them you must declare them
at the beginning of the function with the "global" keyword,
otherwise Python will bind that object to a new local
variable (be careful of that, it's a small catch that can
get you if you don't know it). For example:
>>> number = 5
>>> def myfunc():
# This will print 5.
print number
>>> def anotherfunc():
# This raises an exception because the variable has not
# been bound before printing. Python knows that it an
# object will be bound to it later and creates a new, local
# object instead of accessing the global one.
print number
number = 3
>>> def yetanotherfunc():
global number
# This will correctly change the global.
number = 3
EPILOGUE
This tutorial is not meant to be an exhaustive list of all (or even a subset) of Python.
Python has a vast array of libraries and much much more functionality which you will
have to discover through other means, such as the excellent book Dive into Python.
I hope I have made your transition in Python easier. Please leave comments if you believe
there is something that could be improved or added or if there is anything else
you would like to see (classes, error handling, anything).
"""
def print_python_tutorial():
pydoc.pager(python_tutorial_text)
data_tutorial_text = \
"""
ACCESS YOUR DATA!
Welcome to tutorial on retrieving and processing the data from HITRANonline.
///////////////
/// PREFACE ///
///////////////
HITRANonline API is a set of routines in Python which is aimed to
provide a remote access to functionality and data given by a new project
HITRANonline (http://hitranazure.cloudapp.net).
At the present moment the API can download, filter and process data on
molecular and atomic line-by-line spectra which is provided by HITRANonline portal.
One of the major purposes of introducing API is extending a functionality
of the main site, particularly providing a possibility to calculate several
types of high- and low-resolution spectra based on a flexible HT lineshape.
Each feature of API is represented by a Python function with a set of parameters
providing a flexible approach to the task.
///////////////////////
/// FEATURE SUMMARY ///
///////////////////////
1) Downloading line-by-line data from the HITRANonline site to local database.
2) Filtering and processing the data in SQL-like fashion.
3) Conventional Python structures (lists, tuples, dictionaries) for representing
a spectroscopic data.
4) Possibility to use a large set of third-party Python libraries to work with a data
5) Python implementation of an HT (Hartmann-Tran [1]) lineshape which is used in spectra.
simulations. This lineshape can also be reduced to a number of conventional
line profiles such as Gaussian (Doppler), Lorentzian, Voigt, Rautian,
Speed-dependent Voigt and Rautian.
6) Python implementation of total internal partition sums (TIPS-2011 [2])
which is used in spectra simulations.
7) High-resolution spectra simulation accounting pressure,
temperature and optical path length. The following spectral functions
can be calculated:
a) absorption coefficient
b) absorption spectrum
c) transmittance spectrum
d) radiance spectrum
8) Low-resolution spectra simulation using a number of apparatus functions.
9) Possibility to extend with the user's functionality by adding custom lineshapes,
partitions sums and apparatus functions.
References:
[1] N.H. Ngo, D. Lisak, H. Tran, J.-M. Hartmann.
An isolated line-shape model to go beyond the Voigt profile in
spectroscopic databases and radiative transfer codes.
JQSRT, Volume 129, November 2013, Pages 89–100
http://dx.doi.org/10.1016/j.jqsrt.2013.05.034
[2] A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman.
Total internal partition sums to support planetary remote sensing.
Icarus, Volume 215, Issue 1, September 2011, Pages 391–400
http://dx.doi.org/10.1016/j.icarus.2011.06.004
_______________________________________________________________________
This tutorial will give you an insight of how to use HAPI for Python.
First, let's choose a folder for our local database. Every time you start
your Python project, you have to specify explicitly the name of the
database folder.
>>> db_begin('data')
So, let's download some data from the server and do some processing on it.
Suppose that we want to get line by line data on the main isotopologue of H2O.
For retrieving the data to the local database, user have to specify the following parameters:
1) Name of the local table which will store the downloaded data.
2) Either a pair of molecule and isotopologue HITRAN numbers (M and I),
or a "global" isotopologue ID (iso_id).
3) Wavenumber range (nu_min and nu_max)
N.B. If you specify the name which already exists in the database,
the existing table with that name will be overrided.
To get additional information on function fetch,
call getHelp:
>>> getHelp(fetch)
...
To download the data, simply call the function "fetch".
This will establish a connection with the main server and get the data using
the parameters listed above:
>>> fetch('H2O',1,1,3400,4100)
BEGIN DOWNLOAD: H2O
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
...
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
65536 bytes written to data/H2O.data
Header written to data/H2O.header
END DOWNLOAD
Lines parsed: 7524
PROCESSED
The output is shown right after the console line ">>>".
To check the file that you've just downloaded you can open the database
folder. The new plain text file should have a name "H2O.data" and
it should contain line-by-line data in HITRAN format.
N.B. If we want several isotopologues in one table, we should
use fetch_by_ids instead of just fetch. Fetch_by_ids takes a "global"
isotopologue ID numbers as an input instead of HITRAN's "local" identification.
See getHelp(fetch_by_ids) to get more information on this.
To get a list of tables which are already in the database,
use tableList() function (it takes no arguments):
>>> tableList()
To learn about the table we just downloaded, let's use a function "describeTable".
>>> describeTable('H2O')
-----------------------------------------
H2O summary:
-----------------------------------------
Comment:
Contains lines for H2(16O)
in 3400.000-4100.000 wavenumber range
Number of rows: 7524
Table type: column-fixed
-----------------------------------------
PAR_NAME PAR_FORMAT
molec_id %2d
local_iso_id %1d
nu %12.6f
sw %10.3E
a %10.3E
gamma_air %5.4f
gamma_self %5.3f
elower %10.4f
n_air %4.2f
delta_air %8.6f
global_upper_quanta %15s
global_lower_quanta %15s
local_upper_quanta %15s
local_lower_quanta %15s
ierr %6s
iref %12s
line_mixing_flag %1s
gp %7.1f
gpp %7.1f
-----------------------------------------
This output tells how many rows are currenty in the table H2O, which
wavenumber range was used by fetch(). Also this gives a basic information
about parameters stored in the table.
So, having the table downloaded, one can perform different operations on it
using API.
Here is a list of operations currently available with API:
1) FILTERING
2) OUTPUTTING
3) SORTING
4) GROUPING
////////////////////////////////
/// FILTERING AND OUTPUTTING ///
////////////////////////////////
The table data can be filtered with the help of select() function.
Use simple select() call to output the table content:
>>> select('H2O')
MI nu S A gair gsel E_nair dair ...
11 1000.288940 1.957E-24 2.335E-02.07100.350 1813.22270.680.008260 ...
11 1000.532321 2.190E-28 1.305E-05.04630.281 2144.04590.39-.011030 ...
...
This will display the list of line parameters containing in the table "H2O".
That's the simplest way of using the function select(). Full information
on control parameters can be obtained via getHelp(select) statement.
Suppose that we need a lines from a table within some wavenumber range.
That's what filtering is for. Let's apply a simple range filter on a table.
>>> select('H2O',Conditions=('between','nu',4000,4100))
MI nu S A gair gsel E_nair dair
11 4000.188800 1.513E-25 1.105E-02.03340.298 1581.33570.51-.013910 ...
11 4000.204070 3.482E-24 8.479E-03.08600.454 586.47920.61-.007000 ...
11 4000.469910 3.268E-23 1.627E+00.05410.375 1255.91150.56-.013050 ...
......
As a result of this operation, we see a list of lines of H2O table,
whose wavenumbers lie between 4000 cm-1 and 4100 cm-1.
The condition is taken as an input parameter to API function "select".
To specify a subset of columns to display, use another control parameter -
ParameterNames:
>>> select('H2O',ParameterNames=('nu','sw'),Conditions=('between','nu',4000,4100))
The usage of ParameterNames is outlined below in the section "Specifying a list
of parameters". So far it worth mentioning that this parameter is a part
of a powerful tool for displaying and processing tables from database.
In the next section we will show how to create quieries
with more complex conditions.
////////////////////////////
/// FILTERING CONDITIONS ///
////////////////////////////
Let's analyze the last example of filtering. Condition input variable is
as follows:
('between','nu',4000,4100)
Thus, this is a python list (or tuple), containing logical expressions
defined under column names of the table. For example, 'nu' is a name of
the column in 'H2O' table, and this column contains a transition wavenumber.
The structure of a simple condition is as follows:
(OPERATION,ARG1,ARG2,...)
Where OPERATION must be in a set of predefined operations (see below),
and ARG1,ARG2 etc. are the arguments for this operation.
Conditions can be nested, i.e. ARG can itself be a condition (see examples).
The following operations are available in select (case insensitive):
DESCRIPTION LITERAL EXAMPLE
---------------------------------------------------------------------------------
Range: 'RANGE','BETWEEN': ('BETWEEN','nu',0,1000)
Subset: 'IN','SUBSET': ('IN','local_iso_id',[1,2,3,4])
And: '&','&&','AND': ('AND',('<','nu',1000),('>','nu',10))
Or: '|','||','OR': ('OR',('>','nu',1000),('<','nu',10))
Not: '!','NOT': ('NOT',('IN','local_iso_id',[1,2,3]))
Less than: '<','LESS','LT': ('<','nu',1000)
More than: '>','MORE','MT': ('>','sw',1.0e-20)
Less or equal than: '<=','LESSOREQUAL','LTE': ('<=','local_iso_id',10)
More or equal than '>=','MOREOREQUAL','MTE': ('>=','sw',1e-20)
Equal: '=','==','EQ','EQUAL','EQUALS': ('<=','local_iso_id',10)
Not equal: '!=','<>','~=','NE','NOTEQUAL': ('!=','local_iso_id',1)
Summation: '+','SUM': ('+','v1','v2','v3')
Difference: '-','DIFF': ('-','nu','elow')
Multiplication: '*','MUL': ('*','sw',0.98)
Division: '/','DIV': ('/','A',2)
Cast to string: 'STR','STRING': ('STR','some_string')
Cast to Python list 'LIST': ('LIST',[1,2,3,4,5])
Match regexp 'MATCH','LIKE': ('MATCH','\w+','some string')
Search single match: 'SEARCH': ('SEARCH','\d \d \d','1 2 3 4')
Search all matches: 'FINDALL': ('FINDALL','\d','1 2 3 4 5')
Count within group: 'COUNT' : ('COUNT','local_iso_id')
---------------------------------------------------------------------------------
Let's create a query with more complex condition. Suppese that we are
interested in all lines between 3500 and 4000 with 1e-19 intensity cutoff.
The query will look like this:
>>> Cond = ('AND',('BETWEEN','nu',3500,4000),('>=','Sw',1e-19))
>>> select('H2O',Conditions=Cond,DestinationTableName='tmp')
Here, apart from other parameters, we have used a new parameter
DestinationTableName. This parameter contains a name of the table
where we want to put a result of the query. Thus we have chosen
a name 'tmp' for a new table.
////////////////////////////////////
/// ACCESSING COLUMNS IN A TABLE ///
////////////////////////////////////
To get an access to particular table column (or columns) all we need
is to get a column from a table and put it to Python variable.
For this purpose, there exist two functions:
getColumn(...)
getColumns(...)
The first one returns just one column at a time. The second one returns
a list of solumns.
So, here are some examples of how to use both:
>>> nu1 = getColumn('H2O','nu')
>>> nu2,sw2 = getColumns('H2O',['nu','sw'])
N.B. If you don't remember exact names of columns in a particular table,
use describeTable to get an info on it's structure!
///////////////////////////////////////
/// SPECIFYING A LIST OF PARAMETERS ///
///////////////////////////////////////
Suppose that we want not only select a set of parameters/columns
from a table, but do a certain transformations with them (for example,
multiply column on a coefficient, or add one column to another etc...).
We can make it in two ways. First, we can extract a column from table
using one of the functions (getColumn or getColumns) and do the rest
in Python. The second way is to do it on the level of select.
The select function has a control parameter "ParameterNames", which
makes it possible to specify parameters we want to be selected,
and evaluate some simple arithmetic expressions with them.
Assume that we need only wavenumber and intensity from H2O table.
Also we need to scale an intensity to the unitary abundance. To do so,
we must divide an 'sw' parameter by it's natural abundance (0.99731) for
principal isotopologue of water).
Thus, we have to select two columns:
wavenumber (nu) and scaled intensity (sw/0.99731)
>>> select('H2O',)
////////////////////////////
/// SAVING QUERY TO DISK ///
////////////////////////////
To quickly save a result of a query to disk, the user can take an
advantage of an additional parameter "File".
If this parameter is presented in function call, then the query is
saved to file with the name which was specified in "File".
For example, select all lines from H2O and save the result in file 'H2O.txt':
>>> select('H2O',File='H2O.txt')
////////////////////////////////////////////
/// GETTING INFORMATION ON ISOTOPOLOGUES ///
////////////////////////////////////////////
API provides the following auxillary information about isotopologues
present in HITRAN. Corresponding functions use the standard HITRAN
molecule-isotopologue notation:
1) Natural abundances
>>> abundance(mol_id,iso_id)
2) Molecular masses
>>> molecularMass(mol_id,iso_id)
3) Molecule names
>>> moleculeName(mol_id,iso_id)
4) Isotopologue names
>>> isotopologueName(mol_id,iso_id)
5) ISO_ID
>>> getHelp(ISO_ID)
The latter is a dictionary, which contain all information about
isotopologues concentrated in one place.
"""
def print_data_tutorial():
pydoc.pager(data_tutorial_text)
spectra_tutorial_text = \
"""
CALCULATE YOUR SPECTRA!
Welcome to tutorial on calculating a spectra from line-by-line data.
///////////////
/// PREFACE ///
///////////////
This tutorial will demonstrate how to use different lineshapes and partition
functions, and how to calculate synthetic spectra with respect to different
instruments. It will be shown how to combine different parameters of spectral
calculation to achieve better precision and performance for cross sections.
API provides a powerful tool to calculate cross-sections based on line-by-line
data containing in HITRAN. This features:
*) Python implementation of an HT (Hartmann-Tran [1]) lineshape which is used in
spectra simulations. This lineshape can also be reduced to a number of
conventional line profiles such as Gaussian (Doppler), Lorentzian, Voigt,
Rautian, Speed-dependent Voigt and Rautian.
*) Python implementation of total internal partition sums (TIPS-2011 [2])
which is used in spectra simulations.
*) High-resolution spectra simulation accounting pressure,
temperature and optical path length. The following spectral functions
can be calculated:
a) absorption coefficient
b) absorption spectrum
c) transmittance spectrum
d) radiance spectrum
*) Low-resolution spectra simulation using a number of apparatus functions.
*) Possibility to extend with the user's functionality by adding custom lineshapes,
partitions sums and apparatus functions.
*) An approach to function code is aimed to be flexible enough yet hopefully
intuitive.
References:
[1] N.H. Ngo, D. Lisak, H. Tran, J.-M. Hartmann.
An isolated line-shape model to go beyond the Voigt profile in
spectroscopic databases and radiative transfer codes.
JQSRT, Volume 129, November 2013, Pages 89–100
http://dx.doi.org/10.1016/j.jqsrt.2013.05.034
[2] A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman.
Total internal partition sums to support planetary remote sensing.
Icarus, Volume 215, Issue 1, September 2011, Pages 391–400
http://dx.doi.org/10.1016/j.icarus.2011.06.004
///////////////////////////
/// USING LINE PROFILES ///
///////////////////////////
Several lineshape (line profile) families are currently available:
1) Gaussian (Doppler) profile
2) Lorentzian profile
3) Voigt profile
4) Speed-dependent Voigt profile
5) Rautian profile
6) Speed-dependent Rautian profile
7) HT profile (Hartmann-Tran)
Each profile has it's own uniwue set of parameters. Normally one should
use profile parameters only in conjunction with their "native" profiles.
So, let's start exploring the available profiles using getHelp:
>>> getHelp(profiles)
Profiles available:
HTP : PROFILE_HT
SDRautian : PROFILE_SDRAUTIAN
Rautian : PROFILE_RAUTIAN
SDVoigt : PROFILE_SDVOIGT
Voigt : PROFILE_VOIGT
Lorentz : PROFILE_LORENTZ
Doppler : PROFILE_DOPPLER
Output gives all available profiles. We can get additional info on each
of them just by calling getHelp(ProfileName):
>>> getHelp(PROFILE_HT)
Line profiles, adapted for using with HAPI, are written in Python and
heavily using the numerical library "Numpy". This means that the user
can calculate multiple values of particular profile at once having just
pasted a numpy array as a wavenumber grid (array). Let's give a short
example of how to calculate HT profile on a numpy array.
>>> from numpy import arange
w0 = 1000.
GammaD = 0.005
Gamma0 = 0.2
Gamma2 = 0.01 * Gamma0
Delta0 = 0.002
Delta2 = 0.001 * Delta0
nuVC = 0.2
eta = 0.5
Dw = 1.
ww = arange(w0-Dw, w0+Dw, 0.01) # GRID WITH THE STEP 0.01
l1 = PROFILE_HT(w0,GammaD,Gamma0,Gamma2,Delta0,Delta2,nuVC,eta,ww)[0]
# now l1 contains values of HT profile calculates on the grid ww
On additional information about parameters see getHelp(PROFILE_HT).
It worth noting that PROFILE_HT returns 2 entities: real and imaginary part
of lineshape (as it described in the article given in preface). Apart from
HT, all other profiles return just one entity (the real part).
////////////////////////////
/// USING PARTITION SUMS ///
////////////////////////////
As it was mentioned in the preface to this tutorial, the partition sums
are taken from the TIPS-2011 (the link is given above). Partition sums
are taken for those isotopologues, which are present in HITRAN and in
TIPS-2011 simultaneousely.
N.B. Partition sums are omitted for the following isotopologues which
are in HITRAN at the moment:
ID M I ISO MOL
--------------------------------------------------
117 12 2 H(15N)(16O)3 HNO3
110 14 2 D(19F) HF
107 15 3 D(35Cl) HCl
108 15 4 D(37Cl) HCl
111 16 3 D(79Br) HBr
112 16 4 D(81Br) HBr
113 17 2 D(127I) HI
118 22 2 (14N)(15N) N2
119 29 2 (13C)(16O)(19F)2 COF2
86 34 1 (16O) O
92 39 1 (12C)H3(16O)H CH3OH
114 47 1 (32S)(16O)3 SO3
--------------------------------------------------
The data on these isotopologues is not present in TIPS-2011 but is
present in HITRAN. We're planning to add these molecules after TIPS-2013
is released.
To calculate a partition sum for most of the isotopologues in HITRAN,
we will use a function partitionSum (use getHelp for detailed info).
Let's just mention that
The syntax is as follows: partitionSum(M,I,T), where M,I - standard
HITRAN molecule-isotopologue notation, T - definition of temperature
range.
Usecase 1: temperatuer is defined by a list:
>>> Q = partitionSum(1,1,[70,80,90])
Usecase 2: temperature is defined by bounds and the step:
>>> T,Q = partiionSum(1,1,[70,3000],step=1.0)
In the latter example we calculate a partition sum on a range of
temperatures from 70K to 3000K using a step 1.0 K, and having arrays
of temperature (T) and partition sum (Q) at the output.
///////////////////////////////////////////
/// CALCULATING ABSORPTION COEFFICIENTS ///
///////////////////////////////////////////
Currently API can calculate the following spectral function at arbitrary
thermodynamic parameters:
1) Absorption coefficient
2) Absorption spectrum
3) Transmittance spectrum
4) Radiance spectrum
All these functions can be calculated with or without accounting of
an instrument properties (apparatus function, resolution, path length etc...)
As it well known, the spectral functions such as absorption,
transmittance, and radiance spectra, are calculated on the basis
of the absorption coefficient. By that resaon, absorption coefficient
is the most important part of simulating a cross section. This part of
tutorial is devoted to demonstration how to calculate absorption
coefficient from the HITRAN line-by-line data. Here we give a brief
insight on basic parameters of calculation procedure, talk about some
useful practices and precautions.
To calculate an absorption coefficient, we can use one of the following
functions:
-> absorptionCoefficient_HT
-> absorptionCoefficient_Voigt
-> absorptionCoefficient_Lorentz
-> absorptionCoefficient_Doppler
Each of these function calculates cross sections using different
lineshapes (the names a quite self-explanatory).
You can get detailed information on using each of these functions
by calling getHelp(function_name).
Let's look more closely to the cross sections based on the Lorentz profile.
For doing that, let's have a table downloaded from HITRANonline.
# get data on CO2 main isotopologue in the range 2000-2100 cm-1
>>> fetch('CO2',2,1,2000,2100)
OK, now we're ready to run a fast example of how to calculate an
absorption coefficient cross section:
>>> nu,coef = absorptionCoefficient_Lorentz(SourceTables='CO2')
This example calculates a Lorentz cross section using the whole set of
lines in the "co2" table. This is the simplest possible way to use these
functions, because major part of parameters bound to their default values.
If we have matplotlib installed, then we can visualize it using a plotter:
>>> from pylab import plot
>>> plot(nu,coef)
API provides a flexible control over a calculation procedure. This control
can be achieved by using a number of input parameters. So, let's dig
into the depth of the settings.
The input parameters of absorptionCoefficient_Lorentz are as follows:
Name Default value
-------------------------------------------------------------------
SourceTables '__BUFFER__'
Components All isotopologues in SourceTables
partitionFunction PYTIPS
Environment {'T':296.,'p':1.}
WavenumberRange depends on Components
WavenumberStep 0.01 cm-1
WavenumberWing 10 cm-1
WavenumberWingHW 50 HWHMs
IntensityThreshold 0 cm/molec
GammaL 'gamma_air'
HITRAN_units True
File None
Format '%e %e'
-------------------------------------------------------------------
Newt we'll give a brief explanation for each parameter. After each description
we'll make some notes about the usage of the correspondent parameter.
SourceTables: (required parameter)
List of source tables to take line-by-line data from.
NOTE: User must provide at least one table in the list.
Components: (optional parameter)
List of tuples (M,I,D) to consider in cross section calculation.
M here is a molecule number, I is an isotopologue number,
D is an abundance of the component.
NOTE: If this input contains more than one tuple, then the output
is an absorption coefficient for mixture of corresponding gases.
NOTE2: If omitted, then all data from the source tables is involved.
partitionFunction: (optional parameter)
Instance of partition function of the following format:
Func(M,I,T), where Func - numae of function, (M,I) - HITRAN numbers
for molecule and isotopologue, T - temperature.
Function must return only one output - value of partition sum.
NOTE: Deafult value is PYTIPS - python version of TIPS-2011
Environment: (optional parameter)
Python dictionary containing value of pressure and temperature.
The format is as follows: Environment = {'p':pval,'T':tval},
where "pval" and "tval" are corresponding values in atm and K
respectively.
NOTE: Default value is {'p':1.0,'T':296.0}
WavenumberRange: (optional parameter)
List containing minimum and maximum value of wavenumber to consider
in cross-section calculation. All lines that are out of htese bounds
will be skipped. The firmat is as follows: WavenumberRange=[wn_low,wn_high]
NOTE: If this parameter os skipped, then min and max are taken
from the data from SourceTables. Deprecated name is OmegaRange.
WavenumberStep: (optional parameter)
Value for the wavenumber step.
NOTE: Default value is 0.01 cm-1.
NOTE2: Normally user would want to take the step under 0.001 when
calculating absorption coefficient with Doppler profile
because of very narrow spectral lines. Deprecated name is OmegaStep.
WavenumberWing: (optional parameter)
Absolute value of the line wing in cm-1, i.e. distance from the center
of each line to the most far point where the profile is considered
to be non zero. Deprecated name is OmegaStep.
NOTE: if omitted, then only OmegaWingHW is taken into account.
WavenumberWingHW: (optional parameter)
Relative value of the line wing in halfwidths. Deprecated name is OmegaWingHW.
NOTE: The resulting wing is a maximum value from both OmegaWing and
OmegaWingHW.
IntensityThreshold: (optional parameter)
Absolute value of minimum intensity in cm/molec to consider.
NOTE: default value is 0.
GammaL: (optional parameter)
This is the name of broadening parameter to consider a "Lorentzian"
part in the Voigt profile. In the current 160-char format there is
a choise between "gamma_air" and "gamma_self".
NOTE: If the table has custom columns with a broadening coefficients,
the user can specify the name of this column in GammaL. This
would let the function calculate an absorption with custom
broadening parameter.
HITRAN_units: (optional parameter)
Logical flag for units, in which the absorption coefficient shoould be
calculated. Currently, the choises are: cm^2/molec (if True) and
cm-1 (if False).
NOTE: to calculate other spectral functions like transmitance,
radiance and absorption spectra, user should set HITRAN_units to False.
File: (optional parameter)
The name of the file to save the calculated absorption coefficient.
The file is saved only if this parameter is specified.
Format: (optional parameter)
C-style format for the text data to be saved. Default value is "%e %e".
NOTE: C-style output format specification (which are mostly valid for Python)
can be found, for instance, by the link:
http://www.gnu.org/software/libc/manual/html_node/Formatted-Output.html
N.B. Other functions such as absorptionCoefficient_Voigt(_HT,_Doppler) have
identical parameter sets so the description is the same for each function.
///////////////////////////////////////////////////////////////////
/// CALCULATING ABSORPTION, TRANSMITTANCE, AND RADIANCE SPECTRA ///
///////////////////////////////////////////////////////////////////
Let's calculate an absorption, transmittance, and radiance
spectra on the basis of apsorption coefficient. In order to be consistent
with internal API's units, we need to have an absorption coefficient cm-1:
>>> nu,coef = absorptionCoefficient_Lorentz(SourceTables='CO2',HITRAN_units=False)
To calculate absorption spectrum, use the function absorptionSpectrum():
>>> nu,absorp = absorptionSpectrum(nu,coef)
To calculate transmittance spectrum, use function transmittanceSpectrum():
>>> nu,trans = transmittanceSpectrum(nu,coef)
To calculate radiance spectrum, use function radianceSpectrum():
>>> nu,radi = radianceSpectrum(nu,coef)
The last three commands used a default path length (1 m).
To see complete info on all three functions, look for section
"calculating spectra" in getHelp()
Generally, all these three functions use similar set of parameters:
Wavenumber: (required parameter)
Wavenumber grid to for spectrum. Deprecated name is Omegas.
AbsorptionCoefficient (optional parameter)
Absorption coefficient as input.
Environment={'T': 296.0, 'l': 100.0} (optional parameter)
Environmental parameters for calculating spectrum.
This parameter is a bit specific for each of functions:
For absorptionSpectrum() and transmittanceSpectrum() the default
value is as follows: Environment={'l': 100.0}
For transmittanceSpectrum() the default value, besides path length,
contains a temperature: Environment={'T': 296.0, 'l': 100.0}
NOTE: temperature must be equal to that which was used in
absorptionCoefficient_ routine!
File (optional parameter)
Filename of output file for calculated spectrum.
If omitted, then the file is not created.
Format (optional parameter)
C-style format for spectra output file.
NOTE: Default value is as follows: Format='%e %e'
///////////////////////////////////////
/// APPLYING INSTRUMENTAL FUNCTIONS ///
///////////////////////////////////////
For comparison of the theoretical spectra with the real-world
instruments output it's necessary to take into account instrumental resolution.
For this purpose HAPI has a function convolveSpectrum() which can emulate
spectra with lower resolution using custom instrumental functions.
The following instrumental functions are available:
1) Rectangular
2) Triangular
3) Gaussian
4) Diffraction
5) Michelson
6) Dispersion
7) Lorentz
To get a description of each instrumental function we can use getHelp():
>>> getHelp(slit_functions)
RECTANGULAR : SLIT_RECTANGULAR
TRIANGULAR : SLIT_TRIANGULAR
GAUSSIAN : SLIT_GAUSSIAN
DIFFRACTION : SLIT_DIFFRACTION
MICHELSON : SLIT_MICHELSON
DISPERSION/LORENTZ : SLIT_DISPERSION
For instance,
>>> getHelp(SLIT_MICHELSON)
... will give a datailed info about Michelson's instrumental function.
The function convolveSpectrum() convolutes a high-resulution spectrum
with one of supplied instrumental (slit) functions. The folowing
parameters of this function are provided:
Wavenumber (required parameter)
Array of wavenumbers in high-resolution input spectrum.
Deprecated name is Omega.
CrossSection (required parameter)
Values of high-resolution input spectrum.
Resolution (optional parameter)
This parameter is passed to the slit function. It represents
the resolution of corresponding instrument.
NOTE: default value is 0.1 cm-1
AF_wing (optional parameter)
Width of an instrument function where it is considered non-zero.
NOTE: default value is 10.0 cm-1
SlitFunction (optional parameter)
Custom instrumental function to convolve with spectrum.
Format of the instrumental function must be as follows:
Func(x,g), where Func - function name, x - wavenumber,
g - resolution.
NOTE: if omitted, then the default value is SLIT_RECTANGULAR
Before using the convolution procedure it worth giving some practical
advices and remarks:
1) Quality of a convolution depends on many things: quality of calculated
spectra, width of AF_wing and WavenumberRange, Resolution, WavenumberStep etc ...
Most of these factors are taken from previus stages of spectral calculation.
Right choise of all these factors is crucial for the correct computation.
2) Dispersion, Diffraction and Michelson AF's don't work well in narrow
wavenumber range because of their broad wings.
3) Generally one must consider WavenumberRange and AF_wing as wide as possible.
4) After applying a convolution, the resulting spectral range for
the lower-resolution spectra is reduced by the doubled value of AF_wing.
For this reason, try to make an initial spectral range for high-resolution
spectrum (absorption, transmittance, radiance) sufficiently broad.
The following command will calculate a lower-resolution spectra from
the CO2 transmittance, which was calculated in a previous section.
The Spectral resolution is 1 cm-1,
>>> nu_,trans_,i1,i2,slit = convolveSpectrum(nu,trans)
The outputs are:
nu_, trans_ - wavenumbers and transmittance for the resulting
low-resolution spectrum.
i1,i2 - indexes for initial nu,trans spectrum denoting the part of
wavenumber range which was taken for lower resolution spectrum.
=> Low-res spectrum is calculated on nu[i1:i2]
Note, than to achieve more flexibility, one have to specify most of
the optional parameters. For instance, more complete call is as follows:
>>> nu_,trans_,i1,i2,slit = convolveSpectrum(nu,trans,SlitFunction=SLIT_MICHELSON,Resolution=1.0,AF_wing=20.0)
"""
def print_spectra_tutorial():
pydoc.pager(spectra_tutorial_text)
plotting_tutorial_text = \
"""
PLOTTING THE SPECTRA WITH MATPLOTLIB
This tutorial briefly explains how to make plots using
the Matplotlib - Python library for plotting.
Prerequisites:
To tun through this tutorial, user must have the following
Python libraries installed:
1) Matplotlib
Matplotlib can be obtained by the link http://matplotlib.org/
2) Numpy (required by HAPI itself)
Numpy can be obtained via pip:
sudo pip install numpy (under Linux and Mac)
pip install numpy (under Windows)
Or by the link http://www.numpy.org/
As an option, user can download one of the many scientific Python
distributions, such as Anaconda, Canopy etc...
So, let's calculate plot the basic entities which ar provided by HAPI.
To do so, we will do all necessary steps to download, filter and
calculate cross sections "from scratch". To demonstrate the different
possibilities of matplotlib, we will mostly use Pylab - a part of
Matplotlib with the interface similar to Matlab. Please note, that it's
not the only way to use Matplotlib. More information can be found on it's site.
The next part is a step-by-step guide, demonstrating basic possilities
of HITRANonline API in conjunction with Matplotlib.
First, do some preliminary imports:
>>> from hapi import *
>>> from pylab import show,plot,subplot,xlim,ylim,title,legend,xlabel,ylabel,hold
Start the database 'data':
>>> db_begin('data')
Download lines for main isotopologue of ozone in [3900,4050] range:
>>> fetch('O3',3,1,3900,4050)
PLot a sick spectrum using the function getStickXY()
>>> x,y = getStickXY('O3')
>>> plot(x,y); show()
Zoom in spectral region [4020,4035] cm-1:
>>> plot(x,y); xlim([4020,4035]); show()
Calculate and plot difference between Voigt and Lorentzian lineshape:
>>> wn = arange(3002,3008,0.01) # get wavenumber range of interest
>>> voi = PROFILE_VOIGT(3005,0.1,0.3,wn)[0] # calc Voigt
>>> lor = PROFILE_LORENTZ(3005,0.3,wn) # calc Lorentz
>>> diff = voi-lor # calc difference
>>> subplot(2,1,1) # upper panel
>>> plot(wn,voi,'red',wn,lor,'blue') # plot both profiles
>>> legend(['Voigt','Lorentz']) # show legend
>>> title('Voigt and Lorentz profiles') # show title
>>> subplot(2,1,2) # lower panel
>>> plot(wn,diff) # plot diffenence
>>> title('Voigt-Lorentz residual') # show title
>>> show() # show all figures
Calculate and plot absorption coefficients for ozone using Voigt
profile. Spectra are calculated for 4 cases of thermodynamic parameters:
(1 atm, 296 K), (5 atm, 296 K), (1 atm, 500 K), and (5 atm, 500 K)
>>> nu1,coef1 = absorptionCoefficient_Voigt(((3,1),),'O3',
WavenumberStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':1,'T':296.})
>>> nu2,coef2 = absorptionCoefficient_Voigt(((3,1),),'O3',
WavenumberStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':5,'T':296.})
>>> nu3,coef3 = absorptionCoefficient_Voigt(((3,1),),'O3',
WavenumberStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':1,'T':500.})
>>> nu4,coef4 = absorptionCoefficient_Voigt(((3,1),),'O3',
WavenumberStep=0.01,HITRAN_units=False,GammaL='gamma_self',
Environment={'p':5,'T':500.})
>>> subplot(2,2,1); plot(nu1,coef1); title('O3 k(w): p=1 atm, T=296K')
>>> subplot(2,2,2); plot(nu2,coef2); title('O3 k(w): p=5 atm, T=296K')
>>> subplot(2,2,3); plot(nu3,coef3); title('O3 k(w): p=1 atm, T=500K')
>>> subplot(2,2,4); plot(nu4,coef4); title('O3 k(w): p=5 atm, T=500K')
>>> show()
Calculate and plot absorption, transmittance and radiance spectra for 1 atm
and 296K. Path length is set to 10 m.
>>> nu,absorp = absorptionSpectrum(nu1,coef1,Environment={'l':1000.})
>>> nu,transm = transmittanceSpectrum(nu1,coef1,Environment={'l':1000.})
>>> nu,radian = radianceSpectrum(nu1,coef1,Environment={'l':1000.,'T':296.})
>>> subplot(2,2,1); plot(nu1,coef1,'r'); title('O3 k(w): p=1 atm, T=296K')
>>> subplot(2,2,2); plot(nu,absorp,'g'); title('O3 absorption: p=1 atm, T=296K')
>>> subplot(2,2,3); plot(nu,transm,'b'); title('O3 transmittance: p=1 atm, T=296K')
>>> subplot(2,2,4); plot(nu,radian,'y'); title('O3 radiance: p=1 atm, T=296K')
>>> show()
Calculate and compare high resolution spectrum for O3 with lower resolution
spectrum convoluted with an instrumental function of ideal Michelson interferometer.
>>> nu_,trans_,i1,i2,slit = convolveSpectrum(nu,transm,SlitFunction=SLIT_MICHELSON,Resolution=1.0,AF_wing=20.0)
>>> plot(nu,transm,'red',nu_,trans_,'blue'); legend(['HI-RES','Michelson']); show()
"""
def print_plotting_tutorial():
pydoc.pager(plotting_tutorial_text)
def getHelp(arg=None):
"""
This function provides interactive manuals and tutorials.
"""
if arg == None:
print('--------------------------------------------------------------')
print('Hello, this is an interactive help system of HITRANonline API.')
print('--------------------------------------------------------------')
print('Run getHelp(.) with one of the following arguments:')
print(' tutorial - interactive tutorials on HAPI')
print(' units - units used in calculations')
print(' index - index of available HAPI functions')
elif arg == 'tutorial':
print('-----------------------------------')
print('This is a tutorial section of help.')
print('-----------------------------------')
print('Please choose the subject of tutorial:')
print(' data - downloading the data and working with it')
print(' spectra - calculating spectral functions')
print(' plotting - visualizing data with matplotlib')
print(' python - Python quick start guide')
elif arg == 'python':
print_python_tutorial()
elif arg == 'data':
print_data_tutorial()
elif arg == 'spectra':
print_spectra_tutorial()
elif arg == 'plotting':
print_plotting_tutorial()
elif arg == 'index':
print('------------------------------')
print('FETCHING DATA:')
print('------------------------------')
print(' fetch')
print(' fetch_by_ids')
print('')
print('------------------------------')
print('WORKING WITH DATA:')
print('------------------------------')
print(' db_begin')
print(' db_commit')
print(' tableList')
print(' describe')
print(' select')
print(' sort')
print(' extractColumns')
print(' getColumn')
print(' getColumns')
print(' dropTable')
print('')
print('------------------------------')
print('CALCULATING SPECTRA:')
print('------------------------------')
print(' profiles')
print(' partitionSum')
print(' absorptionCoefficient_HT')
print(' absorptionCoefficient_Voigt')
print(' absorptionCoefficient_SDVoigt')
print(' absorptionCoefficient_Lorentz')
print(' absorptionCoefficient_Doppler')
print(' transmittanceSpectrum')
print(' absorptionSpectrum')
print(' radianceSpectrum')
print('')
print('------------------------------')
print('CONVOLVING SPECTRA:')
print('------------------------------')
print(' convolveSpectrum')
print(' slit_functions')
print('')
print('------------------------------')
print('INFO ON ISOTOPOLOGUES:')
print('------------------------------')
print(' ISO_ID')
print(' abundance')
print(' molecularMass')
print(' moleculeName')
print(' isotopologueName')
print('')
print('------------------------------')
print('MISCELLANEOUS:')
print('------------------------------')
print(' getStickXY')
print(' read_hotw')
elif arg == ISO:
print_iso()
elif arg == ISO_ID:
print_iso_id()
elif arg == profiles:
print_profiles()
elif arg == slit_functions:
print_slit_functions()
else:
help(arg)
# Get atmospheric (natural) abundance
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def abundance(M, I):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
I: HITRAN isotopologue number
OUTPUT PARAMETERS:
Abbundance: natural abundance
---
DESCRIPTION:
Return natural (Earth) abundance of HITRAN isotolopogue.
---
EXAMPLE OF USAGE:
ab = abundance(1,1) # H2O
---
"""
return ISO[(M, I)][ISO_INDEX['abundance']]
# Get molecular mass
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def molecularMass(M, I):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
I: HITRAN isotopologue number
OUTPUT PARAMETERS:
MolMass: molecular mass
---
DESCRIPTION:
Return molecular mass of HITRAN isotolopogue.
---
EXAMPLE OF USAGE:
mass = molecularMass(1,1) # H2O
---
"""
return ISO[(M, I)][ISO_INDEX['mass']]
# Get molecule name
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def moleculeName(M):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
OUTPUT PARAMETERS:
MolName: molecular name
---
DESCRIPTION:
Return name of HITRAN molecule.
---
EXAMPLE OF USAGE:
molname = moleculeName(1) # H2O
---
"""
return ISO[(M, 1)][ISO_INDEX['mol_name']]
# Get isotopologue name
# for a specified isotopologue
# M - molecule number
# I - isotopologue number
def isotopologueName(M, I):
"""
INPUT PARAMETERS:
M: HITRAN molecule number
I: HITRAN isotopologue number
OUTPUT PARAMETERS:
IsoMass: isotopologue mass
---
DESCRIPTION:
Return name of HITRAN isotolopogue.
---
EXAMPLE OF USAGE:
isoname = isotopologueName(1,1) # H2O
---
"""
return ISO[(M, I)][ISO_INDEX['iso_name']]
# ----------------------- table list ----------------------------------
def tableList():
"""
INPUT PARAMETERS:
none
OUTPUT PARAMETERS:
TableList: a list of available tables
---
DESCRIPTION:
Return a list of tables present in database.
---
EXAMPLE OF USAGE:
lst = tableList()
---
"""
return getTableList()
# ----------------------- describe ----------------------------------
def describe(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to describe
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Print information about table, including
parameter names, formats and wavenumber range.
---
EXAMPLE OF USAGE:
describe('sampletab')
---
"""
describeTable(TableName)
# ---------------------- /ISO.PY ---------------------------------------
def db_begin(db=None):
"""
INPUT PARAMETERS:
db: database name (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Open a database connection. A database is stored
in a folder given in db input parameter.
Default=data
---
EXAMPLE OF USAGE:
db_begin('bar')
---
"""
databaseBegin(db)
def db_commit():
"""
INPUT PARAMETERS:
none
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Commit all changes made to opened database.
All tables will be saved in corresponding files.
---
EXAMPLE OF USAGE:
db_commit()
---
"""
databaseCommit()
# ------------------ QUERY HITRAN ---------------------------------------
def comment(TableName, Comment):
LOCAL_TABLE_CACHE[TableName]['header']['comment'] = Comment
def fetch_by_ids(TableName, iso_id_list, numin, numax, ParameterGroups=[], Parameters=[]):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
iso_id_list: list of isotopologue id's (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameter iso_id_list
contains list of "global" isotopologue Ids (see help on ISO_ID).
Note: this function is required if user wants to download
multiple species into single table.
---
EXAMPLE OF USAGE:
fetch_by_ids('water',[1,2,3,4],4000,4100)
---
"""
if type(iso_id_list) not in set([list, tuple]):
iso_id_list = [iso_id_list]
queryHITRAN(TableName, iso_id_list, numin, numax,
pargroups=ParameterGroups, params=Parameters)
iso_names = [ISO_ID[i][ISO_ID_INDEX['iso_name']] for i in iso_id_list]
Comment = 'Contains lines for ' + ','.join(iso_names)
Comment += ('\n in %.3f-%.3f wavenumber range' % (numin, numax))
comment(TableName, Comment)
# def queryHITRAN(TableName,iso_id_list,numin,numax):
def fetch(TableName, M, I, numin, numax, ParameterGroups=[], Parameters=[]):
"""
INPUT PARAMETERS:
TableName: local table name to fetch in (required)
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
numin: lower wavenumber bound (required)
numax: upper wavenumber bound (required)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Download line-by-line data from HITRANonline server
and save it to local table. The input parameters M and I
are the HITRAN molecule and isotopologue numbers.
This function results in a table containing single
isotopologue specie. To have multiple species in a
single table use fetch_by_ids instead.
---
EXAMPLE OF USAGE:
fetch('HOH',1,1,4000,4100)
---
"""
queryHITRAN(TableName, [ISO[(M, I)][ISO_INDEX['id']]], numin, numax,
pargroups=ParameterGroups, params=Parameters)
iso_name = ISO[(M, I)][ISO_INDEX['iso_name']]
Comment = 'Contains lines for ' + iso_name
Comment += ('\n in %.3f-%.3f wavenumber range' % (numin, numax))
comment(TableName, Comment)
# ------------------ partition sum --------------------------------------
# ------------------- LAGRANGE INTERPOLATION ----------------------
# def AtoB(aa,bb,A,B,npt)
def AtoB(aa, A, B, npt):
# ***************************
# ...LaGrange 3- and 4-point interpolation
# ...arrays A and B are the npt data points, given aa, a value of the
# ...A variable, the routine will find the corresponding bb value
#
# ...input: aa
# ...output: bb
for I in range(2, npt + 1):
if A[I - 1] >= aa:
if I < 3 or I == npt:
J = I
if I < 3: J = 3
if I == npt: J = npt
J = J - 1 # zero index correction
A0D1 = A[J - 2] - A[J - 1]
if A0D1 == 0.0: A0D1 = 0.0001
A0D2 = A[J - 2] - A[J]
if A0D2 == 0.0: A0D2 = 0.0000
A1D1 = A[J - 1] - A[J - 2]
if A1D1 == 0.0: A1D1 = 0.0001
A1D2 = A[J - 1] - A[J]
if A1D2 == 0.0: A1D2 = 0.0001
A2D1 = A[J] - A[J - 2]
if A2D1 == 0.0: A2D1 = 0.0001
A2D2 = A[J] - A[J - 1]
if A2D2 == 0.0: A2D2 = 0.0001
A0 = (aa - A[J - 1]) * (aa - A[J]) / (A0D1 * A0D2)
A1 = (aa - A[J - 2]) * (aa - A[J]) / (A1D1 * A1D2)
A2 = (aa - A[J - 2]) * (aa - A[J - 1]) / (A2D1 * A2D2)
bb = A0 * B[J - 2] + A1 * B[J - 1] + A2 * B[J]
else:
J = I
J = J - 1 # zero index correction
A0D1 = A[J - 2] - A[J - 1]
if A0D1 == 0.0: A0D1 = 0.0001
A0D2 = A[J - 2] - A[J]
if A0D2 == 0.0: A0D2 = 0.0001
A0D3 = (A[J - 2] - A[J + 1])
if A0D3 == 0.0: A0D3 = 0.0001
A1D1 = A[J - 1] - A[J - 2]
if A1D1 == 0.0: A1D1 = 0.0001
A1D2 = A[J - 1] - A[J]
if A1D2 == 0.0: A1D2 = 0.0001
A1D3 = A[J - 1] - A[J + 1]
if A1D3 == 0.0: A1D3 = 0.0001
A2D1 = A[J] - A[J - 2]
if A2D1 == 0.0: A2D1 = 0.0001
A2D2 = A[J] - A[J - 1]
if A2D2 == 0.0: A2D2 = 0.0001
A2D3 = A[J] - A[J + 1]
if A2D3 == 0.0: A2D3 = 0.0001
A3D1 = A[J + 1] - A[J - 2]
if A3D1 == 0.0: A3D1 = 0.0001
A3D2 = A[J + 1] - A[J - 1]
if A3D2 == 0.0: A3D2 = 0.0001
A3D3 = A[J + 1] - A[J]
if A3D3 == 0.0: A3D3 = 0.0001
A0 = (aa - A[J - 1]) * (aa - A[J]) * (aa - A[J + 1])
A0 = A0 / (A0D1 * A0D2 * A0D3)
A1 = (aa - A[J - 2]) * (aa - A[J]) * (aa - A[J + 1])
A1 = A1 / (A1D1 * A1D2 * A1D3)
A2 = (aa - A[J - 2]) * (aa - A[J - 1]) * (aa - A[J + 1])
A2 = A2 / (A2D1 * A2D2 * A2D3)
A3 = (aa - A[J - 2]) * (aa - A[J - 1]) * (aa - A[J])
A3 = A3 / (A3D1 * A3D2 * A3D3)
bb = A0 * B[J - 2] + A1 * B[J - 1] + A2 * B[J] + A3 * B[J + 1]
break
return bb
# --------------- ISOTOPOLOGUE HASH ----------------------
TIPS_ISO_HASH = {}
# --------------- STATISTICAL WEIGHT HASH ----------------------
TIPS_GSI_HASH = {}
# --------------- INTERPOLATION NODES ----------------------
Tdat = __FloatType__([60., 85., 110., 135., 160., 185., 210., 235.,
260., 285., 310., 335., 360., 385., 410., 435., 460., 485.,
510., 535., 560., 585., 610., 635., 660., 685., 710., 735.,
760., 785., 810., 835., 860., 885., 910., 935., 960., 985.,
1010., 1035., 1060., 1085., 1110., 1135., 1160., 1185., 1210., 1235.,
1260., 1285., 1310., 1335., 1360., 1385., 1410., 1435., 1460., 1485.,
1510., 1535., 1560., 1585., 1610., 1635., 1660., 1685., 1710., 1735.,
1760., 1785., 1810., 1835., 1860., 1885., 1910., 1935., 1960., 1985.,
2010., 2035., 2060., 2085., 2110., 2135., 2160., 2185., 2210., 2235.,
2260., 2285., 2310., 2335., 2360., 2385., 2410., 2435., 2460., 2485.,
2510., 2535., 2560., 2585., 2610., 2635., 2660., 2685., 2710., 2735.,
2760., 2785., 2810., 2835., 2860., 2885., 2910., 2935., 2960., 2985.,
3010.])
TIPS_NPT = len(Tdat)
# REMARK
# float32 gives exactly the same results as fortran TIPS, because
# all constants in the fortran code given as xx.xxE+-XX, i.e.
# in single precision. By this fact all unsignificant figures
# over single precision are filled with digital garbage
# --------------- H2O 161: M = 1, I = 1 ---------------------
M = 1
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.16824E+02, 0.27771E+02, 0.40408E+02,
0.54549E+02, 0.70054E+02, 0.86817E+02, 0.10475E+03, 0.12380E+03,
0.14391E+03, 0.16503E+03, 0.18714E+03, 0.21021E+03, 0.23425E+03,
0.25924E+03, 0.28518E+03, 0.31209E+03, 0.33997E+03, 0.36883E+03,
0.39870E+03, 0.42959E+03, 0.46152E+03, 0.49452E+03, 0.52860E+03,
0.56380E+03, 0.60015E+03, 0.63766E+03, 0.67637E+03, 0.71631E+03,
0.75750E+03, 0.79999E+03, 0.84380E+03, 0.88897E+03, 0.93553E+03,
0.98353E+03, 0.10330E+04, 0.10840E+04, 0.11365E+04, 0.11906E+04,
0.12463E+04, 0.13037E+04, 0.13628E+04, 0.14237E+04, 0.14863E+04,
0.15509E+04, 0.16173E+04, 0.16856E+04, 0.17559E+04, 0.18283E+04,
0.19028E+04, 0.19793E+04, 0.20581E+04, 0.21391E+04, 0.22224E+04,
0.23080E+04, 0.24067E+04, 0.24975E+04, 0.25908E+04, 0.26867E+04,
0.27853E+04, 0.28865E+04, 0.29904E+04, 0.30972E+04, 0.32068E+04,
0.33194E+04, 0.34349E+04, 0.35535E+04, 0.36752E+04, 0.38001E+04,
0.39282E+04, 0.40597E+04, 0.41945E+04, 0.43327E+04, 0.44745E+04,
0.46199E+04, 0.47688E+04, 0.49215E+04, 0.50780E+04, 0.52384E+04,
0.54027E+04, 0.55710E+04, 0.57434E+04, 0.59200E+04, 0.61008E+04,
0.62859E+04, 0.64754E+04, 0.66693E+04, 0.68679E+04, 0.70710E+04,
0.72788E+04, 0.74915E+04, 0.77090E+04, 0.79315E+04, 0.81590E+04,
0.83917E+04, 0.86296E+04, 0.88728E+04, 0.91214E+04, 0.93755E+04,
0.96351E+04, 0.99005E+04, 0.10171E+05, 0.10448E+05, 0.10731E+05,
0.11020E+05, 0.11315E+05, 0.11617E+05, 0.11924E+05, 0.12238E+05,
0.12559E+05, 0.12886E+05, 0.13220E+05, 0.13561E+05, 0.13909E+05,
0.14263E+05, 0.14625E+05, 0.14995E+05, 0.15371E+05, 0.15755E+05,
0.16147E+05])
# --------------- H2O 181: M = 1, I = 2 ---------------------
M = 1
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.15960E+02, 0.26999E+02, 0.39743E+02,
0.54003E+02, 0.69639E+02, 0.86543E+02, 0.10463E+03, 0.12384E+03,
0.14412E+03, 0.16542E+03, 0.18773E+03, 0.21103E+03, 0.23531E+03,
0.26057E+03, 0.28681E+03, 0.31406E+03, 0.34226E+03, 0.37130E+03,
0.40135E+03, 0.43243E+03, 0.46456E+03, 0.49777E+03, 0.53206E+03,
0.56748E+03, 0.60405E+03, 0.64179E+03, 0.68074E+03, 0.72093E+03,
0.76238E+03, 0.80513E+03, 0.84922E+03, 0.89467E+03, 0.94152E+03,
0.98982E+03, 0.10396E+04, 0.10909E+04, 0.11437E+04, 0.11982E+04,
0.12543E+04, 0.13120E+04, 0.13715E+04, 0.14328E+04, 0.14959E+04,
0.15608E+04, 0.16276E+04, 0.16964E+04, 0.17672E+04, 0.18401E+04,
0.19151E+04, 0.19922E+04, 0.20715E+04, 0.21531E+04, 0.22370E+04,
0.23232E+04, 0.24118E+04, 0.25030E+04, 0.25967E+04, 0.26929E+04,
0.27918E+04, 0.28934E+04, 0.29978E+04, 0.31050E+04, 0.32151E+04,
0.33281E+04, 0.34441E+04, 0.35632E+04, 0.36854E+04, 0.38108E+04,
0.39395E+04, 0.40715E+04, 0.42070E+04, 0.43459E+04, 0.44883E+04,
0.46343E+04, 0.47840E+04, 0.49374E+04, 0.50946E+04, 0.52558E+04,
0.54209E+04, 0.55900E+04, 0.57632E+04, 0.59407E+04, 0.61224E+04,
0.63084E+04, 0.64988E+04, 0.66938E+04, 0.68933E+04, 0.70975E+04,
0.73064E+04, 0.75202E+04, 0.77389E+04, 0.79625E+04, 0.81913E+04,
0.84252E+04, 0.86644E+04, 0.89089E+04, 0.91588E+04, 0.94143E+04,
0.96754E+04, 0.99422E+04, 0.10215E+05, 0.10493E+05, 0.10778E+05,
0.11068E+05, 0.11365E+05, 0.11668E+05, 0.11977E+05, 0.12293E+05,
0.12616E+05, 0.12945E+05, 0.13281E+05, 0.13624E+05, 0.13973E+05,
0.14330E+05, 0.14694E+05, 0.15066E+05, 0.15445E+05, 0.15831E+05,
0.16225E+05])
# --------------- H2O 171: M = 1, I = 3 ---------------------
M = 1
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.95371E+02, 0.16134E+03, 0.23750E+03,
0.32273E+03, 0.41617E+03, 0.51722E+03, 0.62540E+03, 0.74036E+03,
0.86185E+03, 0.98970E+03, 0.11238E+04, 0.12642E+04, 0.14097E+04,
0.15599E+04, 0.17159E+04, 0.18777E+04, 0.20453E+04, 0.22188E+04,
0.23983E+04, 0.25840E+04, 0.27760E+04, 0.29743E+04, 0.31792E+04,
0.33907E+04, 0.36091E+04, 0.38346E+04, 0.40672E+04, 0.43072E+04,
0.45547E+04, 0.48100E+04, 0.50732E+04, 0.53446E+04, 0.56244E+04,
0.59128E+04, 0.62100E+04, 0.65162E+04, 0.68317E+04, 0.71567E+04,
0.74915E+04, 0.78363E+04, 0.81914E+04, 0.85571E+04, 0.89335E+04,
0.93211E+04, 0.97200E+04, 0.10131E+05, 0.10553E+05, 0.10988E+05,
0.11435E+05, 0.11895E+05, 0.12368E+05, 0.12855E+05, 0.13356E+05,
0.13870E+05, 0.14399E+05, 0.14943E+05, 0.15502E+05, 0.16076E+05,
0.16666E+05, 0.17272E+05, 0.17895E+05, 0.18534E+05, 0.19191E+05,
0.19865E+05, 0.20557E+05, 0.21267E+05, 0.21996E+05, 0.22744E+05,
0.23512E+05, 0.24299E+05, 0.25106E+05, 0.25935E+05, 0.26784E+05,
0.27655E+05, 0.28547E+05, 0.29462E+05, 0.30400E+05, 0.31361E+05,
0.32345E+05, 0.33353E+05, 0.34386E+05, 0.35444E+05, 0.36527E+05,
0.37637E+05, 0.38772E+05, 0.39934E+05, 0.41124E+05, 0.42341E+05,
0.43587E+05, 0.44861E+05, 0.46165E+05, 0.47498E+05, 0.48862E+05,
0.50256E+05, 0.51682E+05, 0.53139E+05, 0.54629E+05, 0.56152E+05,
0.57708E+05, 0.59299E+05, 0.60923E+05, 0.62583E+05, 0.64279E+05,
0.66011E+05, 0.67779E+05, 0.69585E+05, 0.71429E+05, 0.73312E+05,
0.75234E+05, 0.77195E+05, 0.79197E+05, 0.81240E+05, 0.83325E+05,
0.85452E+05, 0.87622E+05, 0.89835E+05, 0.92093E+05, 0.94395E+05,
0.96743E+05])
# --------------- H2O 162: M = 1, I = 4 ---------------------
M = 1
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.75792E+02, 0.12986E+03, 0.19244E+03,
0.26253E+03, 0.33942E+03, 0.42259E+03, 0.51161E+03, 0.60619E+03,
0.70609E+03, 0.81117E+03, 0.92132E+03, 0.10365E+04, 0.11567E+04,
0.12820E+04, 0.14124E+04, 0.15481E+04, 0.16891E+04, 0.18355E+04,
0.19876E+04, 0.21455E+04, 0.23092E+04, 0.24791E+04, 0.26551E+04,
0.28376E+04, 0.30268E+04, 0.32258E+04, 0.34288E+04, 0.36392E+04,
0.38571E+04, 0.40828E+04, 0.43165E+04, 0.45584E+04, 0.48089E+04,
0.50681E+04, 0.53363E+04, 0.56139E+04, 0.59009E+04, 0.61979E+04,
0.65049E+04, 0.68224E+04, 0.71506E+04, 0.74898E+04, 0.78403E+04,
0.82024E+04, 0.85765E+04, 0.89628E+04, 0.93618E+04, 0.97736E+04,
0.10199E+05, 0.10637E+05, 0.11090E+05, 0.11557E+05, 0.12039E+05,
0.12535E+05, 0.13047E+05, 0.13575E+05, 0.14119E+05, 0.14679E+05,
0.15257E+05, 0.15851E+05, 0.16464E+05, 0.17094E+05, 0.17743E+05,
0.18411E+05, 0.19098E+05, 0.19805E+05, 0.20532E+05, 0.21280E+05,
0.22049E+05, 0.22840E+05, 0.23652E+05, 0.24487E+05, 0.25345E+05,
0.26227E+05, 0.27132E+05, 0.28062E+05, 0.29016E+05, 0.29997E+05,
0.31002E+05, 0.32035E+05, 0.33094E+05, 0.34180E+05, 0.35295E+05,
0.36438E+05, 0.37610E+05, 0.38812E+05, 0.40044E+05, 0.41306E+05,
0.42600E+05, 0.43926E+05, 0.45284E+05, 0.46675E+05, 0.48100E+05,
0.49559E+05, 0.51053E+05, 0.52583E+05, 0.54148E+05, 0.55750E+05,
0.57390E+05, 0.59067E+05, 0.60783E+05, 0.62539E+05, 0.64334E+05,
0.66170E+05, 0.68047E+05, 0.69967E+05, 0.71929E+05, 0.73934E+05,
0.75983E+05, 0.78078E+05, 0.80217E+05, 0.82403E+05, 0.84636E+05,
0.86917E+05, 0.89246E+05, 0.91625E+05, 0.94053E+05, 0.96533E+05,
0.99064E+05])
# --------------- H2O 182: M = 1, I = 5 ---------------------
M = 1
I = 5
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.82770E+02, 0.13749E+03, 0.20083E+03,
0.27176E+03, 0.34955E+03, 0.43370E+03, 0.52376E+03, 0.61944E+03,
0.72050E+03, 0.82679E+03, 0.93821E+03, 0.10547E+04, 0.11763E+04,
0.13031E+04, 0.14350E+04, 0.15723E+04, 0.17150E+04, 0.18633E+04,
0.20172E+04, 0.21770E+04, 0.23429E+04, 0.25149E+04, 0.26934E+04,
0.28784E+04, 0.30702E+04, 0.32690E+04, 0.34750E+04, 0.36885E+04,
0.39096E+04, 0.41386E+04, 0.43758E+04, 0.46213E+04, 0.48755E+04,
0.51386E+04, 0.54109E+04, 0.56927E+04, 0.59841E+04, 0.62856E+04,
0.65973E+04, 0.69197E+04, 0.72529E+04, 0.75973E+04, 0.79533E+04,
0.83210E+04, 0.87009E+04, 0.90933E+04, 0.94985E+04, 0.99168E+04,
0.10348E+05, 0.10794E+05, 0.11254E+05, 0.11728E+05, 0.12217E+05,
0.12722E+05, 0.13242E+05, 0.13778E+05, 0.14331E+05, 0.14900E+05,
0.15486E+05, 0.16091E+05, 0.16713E+05, 0.17353E+05, 0.18012E+05,
0.18691E+05, 0.19389E+05, 0.20108E+05, 0.20847E+05, 0.21607E+05,
0.22388E+05, 0.23191E+05, 0.24017E+05, 0.24866E+05, 0.25738E+05,
0.26633E+05, 0.27553E+05, 0.28498E+05, 0.29468E+05, 0.30464E+05,
0.31486E+05, 0.32536E+05, 0.33612E+05, 0.34716E+05, 0.35849E+05,
0.37011E+05, 0.38202E+05, 0.39424E+05, 0.40676E+05, 0.41959E+05,
0.43274E+05, 0.44622E+05, 0.46002E+05, 0.47416E+05, 0.48864E+05,
0.50348E+05, 0.51866E+05, 0.53421E+05, 0.55012E+05, 0.56640E+05,
0.58307E+05, 0.60012E+05, 0.61757E+05, 0.63541E+05, 0.65366E+05,
0.67233E+05, 0.69141E+05, 0.71092E+05, 0.73087E+05, 0.75125E+05,
0.77209E+05, 0.79338E+05, 0.81513E+05, 0.83736E+05, 0.86006E+05,
0.88324E+05, 0.90693E+05, 0.93111E+05, 0.95580E+05, 0.98100E+05,
0.10067E+06])
# --------------- H2O 172: M = 1, I = 6 ---------------------
M = 1
I = 6
TIPS_GSI_HASH[(M, I)] = __FloatType__(36.)
TIPS_ISO_HASH[(M, I)] = float32([0.49379E+03, 0.82021E+03, 0.11980E+04,
0.16211E+04, 0.20851E+04, 0.25870E+04, 0.31242E+04, 0.36949E+04,
0.42977E+04, 0.49317E+04, 0.55963E+04, 0.62911E+04, 0.70164E+04,
0.77722E+04, 0.85591E+04, 0.93777E+04, 0.10228E+05, 0.11112E+05,
0.12030E+05, 0.12983E+05, 0.13971E+05, 0.14997E+05, 0.16061E+05,
0.17163E+05, 0.18306E+05, 0.19491E+05, 0.20719E+05, 0.21991E+05,
0.23309E+05, 0.24673E+05, 0.26086E+05, 0.27549E+05, 0.29064E+05,
0.30631E+05, 0.32254E+05, 0.33932E+05, 0.35669E+05, 0.37464E+05,
0.39321E+05, 0.41242E+05, 0.43227E+05, 0.45279E+05, 0.47399E+05,
0.49589E+05, 0.51852E+05, 0.54189E+05, 0.56602E+05, 0.59094E+05,
0.61666E+05, 0.64320E+05, 0.67058E+05, 0.69883E+05, 0.72796E+05,
0.75801E+05, 0.78899E+05, 0.82092E+05, 0.85382E+05, 0.88773E+05,
0.92266E+05, 0.95863E+05, 0.99568E+05, 0.10338E+06, 0.10731E+06,
0.11135E+06, 0.11551E+06, 0.11979E+06, 0.12419E+06, 0.12871E+06,
0.13337E+06, 0.13815E+06, 0.14307E+06, 0.14812E+06, 0.15331E+06,
0.15865E+06, 0.16412E+06, 0.16975E+06, 0.17553E+06, 0.18146E+06,
0.18754E+06, 0.19379E+06, 0.20020E+06, 0.20678E+06, 0.21352E+06,
0.22044E+06, 0.22753E+06, 0.23480E+06, 0.24226E+06, 0.24990E+06,
0.25773E+06, 0.26575E+06, 0.27397E+06, 0.28239E+06, 0.29102E+06,
0.29985E+06, 0.30889E+06, 0.31814E+06, 0.32762E+06, 0.33731E+06,
0.34724E+06, 0.35739E+06, 0.36777E+06, 0.37840E+06, 0.38926E+06,
0.40038E+06, 0.41174E+06, 0.42335E+06, 0.43523E+06, 0.44737E+06,
0.45977E+06, 0.47245E+06, 0.48540E+06, 0.49863E+06, 0.51214E+06,
0.52595E+06, 0.54005E+06, 0.55444E+06, 0.56914E+06, 0.58415E+06,
0.59947E+06])
# --------------- CO2 626: M = 2, I = 1 ---------------------
M = 2
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.53642E+02, 0.75947E+02, 0.98292E+02,
0.12078E+03, 0.14364E+03, 0.16714E+03, 0.19160E+03, 0.21731E+03,
0.24454E+03, 0.27355E+03, 0.30456E+03, 0.33778E+03, 0.37343E+03,
0.41170E+03, 0.45280E+03, 0.49692E+03, 0.54427E+03, 0.59505E+03,
0.64948E+03, 0.70779E+03, 0.77019E+03, 0.83693E+03, 0.90825E+03,
0.98440E+03, 0.10656E+04, 0.11522E+04, 0.12445E+04, 0.13427E+04,
0.14471E+04, 0.15580E+04, 0.16759E+04, 0.18009E+04, 0.19334E+04,
0.20739E+04, 0.22225E+04, 0.23798E+04, 0.25462E+04, 0.27219E+04,
0.29074E+04, 0.31032E+04, 0.33097E+04, 0.35272E+04, 0.37564E+04,
0.39976E+04, 0.42514E+04, 0.45181E+04, 0.47985E+04, 0.50929E+04,
0.54019E+04, 0.57260E+04, 0.60659E+04, 0.64221E+04, 0.67952E+04,
0.71859E+04, 0.75946E+04, 0.80222E+04, 0.84691E+04, 0.89362E+04,
0.94241E+04, 0.99335E+04, 0.10465E+05, 0.11020E+05, 0.11598E+05,
0.12201E+05, 0.12828E+05, 0.13482E+05, 0.14163E+05, 0.14872E+05,
0.15609E+05, 0.16376E+05, 0.17173E+05, 0.18001E+05, 0.18861E+05,
0.19754E+05, 0.20682E+05, 0.21644E+05, 0.22643E+05, 0.23678E+05,
0.24752E+05, 0.25865E+05, 0.27018E+05, 0.28212E+05, 0.29449E+05,
0.30730E+05, 0.32055E+05, 0.33426E+05, 0.34845E+05, 0.36312E+05,
0.37828E+05, 0.39395E+05, 0.41015E+05, 0.42688E+05, 0.44416E+05,
0.46199E+05, 0.48041E+05, 0.49942E+05, 0.51902E+05, 0.53925E+05,
0.56011E+05, 0.58162E+05, 0.60379E+05, 0.62664E+05, 0.65019E+05,
0.67444E+05, 0.69942E+05, 0.72515E+05, 0.75163E+05, 0.77890E+05,
0.80695E+05, 0.83582E+05, 0.86551E+05, 0.89605E+05, 0.92746E+05,
0.95975E+05, 0.99294E+05, 0.10271E+06, 0.10621E+06, 0.10981E+06,
0.11351E+06])
# --------------- CO2 636: M = 2, I = 2 ---------------------
M = 2
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.10728E+03, 0.15189E+03, 0.19659E+03,
0.24164E+03, 0.28753E+03, 0.33486E+03, 0.38429E+03, 0.43643E+03,
0.49184E+03, 0.55104E+03, 0.61449E+03, 0.68263E+03, 0.75589E+03,
0.83468E+03, 0.91943E+03, 0.10106E+04, 0.11085E+04, 0.12137E+04,
0.13266E+04, 0.14477E+04, 0.15774E+04, 0.17163E+04, 0.18649E+04,
0.20237E+04, 0.21933E+04, 0.23743E+04, 0.25673E+04, 0.27729E+04,
0.29917E+04, 0.32245E+04, 0.34718E+04, 0.37345E+04, 0.40132E+04,
0.43087E+04, 0.46218E+04, 0.49533E+04, 0.53041E+04, 0.56749E+04,
0.60668E+04, 0.64805E+04, 0.69171E+04, 0.73774E+04, 0.78626E+04,
0.83736E+04, 0.89114E+04, 0.94772E+04, 0.10072E+05, 0.10697E+05,
0.11353E+05, 0.12042E+05, 0.12765E+05, 0.13523E+05, 0.14317E+05,
0.15148E+05, 0.16019E+05, 0.16930E+05, 0.17883E+05, 0.18879E+05,
0.19920E+05, 0.21008E+05, 0.22143E+05, 0.23328E+05, 0.24563E+05,
0.25852E+05, 0.27195E+05, 0.28594E+05, 0.30051E+05, 0.31568E+05,
0.33146E+05, 0.34788E+05, 0.36496E+05, 0.38271E+05, 0.40115E+05,
0.42031E+05, 0.44021E+05, 0.46086E+05, 0.48230E+05, 0.50453E+05,
0.52759E+05, 0.55150E+05, 0.57628E+05, 0.60195E+05, 0.62854E+05,
0.65608E+05, 0.68459E+05, 0.71409E+05, 0.74461E+05, 0.77618E+05,
0.80883E+05, 0.84258E+05, 0.87746E+05, 0.91350E+05, 0.95073E+05,
0.98918E+05, 0.10289E+06, 0.10698E+06, 0.11121E+06, 0.11558E+06,
0.12008E+06, 0.12472E+06, 0.12950E+06, 0.13443E+06, 0.13952E+06,
0.14475E+06, 0.15015E+06, 0.15571E+06, 0.16143E+06, 0.16732E+06,
0.17338E+06, 0.17962E+06, 0.18604E+06, 0.19264E+06, 0.19943E+06,
0.20642E+06, 0.21360E+06, 0.22098E+06, 0.22856E+06, 0.23636E+06,
0.24436E+06])
# --------------- CO2 628: M = 2, I = 3 ---------------------
M = 2
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.11368E+03, 0.16096E+03, 0.20833E+03,
0.25603E+03, 0.30452E+03, 0.35442E+03, 0.40640E+03, 0.46110E+03,
0.51910E+03, 0.58093E+03, 0.64709E+03, 0.71804E+03, 0.79422E+03,
0.87607E+03, 0.96402E+03, 0.10585E+04, 0.11600E+04, 0.12689E+04,
0.13857E+04, 0.15108E+04, 0.16449E+04, 0.17883E+04, 0.19416E+04,
0.21054E+04, 0.22803E+04, 0.24668E+04, 0.26655E+04, 0.28770E+04,
0.31021E+04, 0.33414E+04, 0.35956E+04, 0.38654E+04, 0.41516E+04,
0.44549E+04, 0.47761E+04, 0.51160E+04, 0.54755E+04, 0.58555E+04,
0.62568E+04, 0.66804E+04, 0.71273E+04, 0.75982E+04, 0.80944E+04,
0.86169E+04, 0.91666E+04, 0.97446E+04, 0.10352E+05, 0.10990E+05,
0.11660E+05, 0.12363E+05, 0.13101E+05, 0.13874E+05, 0.14683E+05,
0.15531E+05, 0.16418E+05, 0.17347E+05, 0.18317E+05, 0.19332E+05,
0.20392E+05, 0.21499E+05, 0.22654E+05, 0.23859E+05, 0.25116E+05,
0.26426E+05, 0.27792E+05, 0.29214E+05, 0.30695E+05, 0.32236E+05,
0.33840E+05, 0.35508E+05, 0.37242E+05, 0.39045E+05, 0.40917E+05,
0.42862E+05, 0.44881E+05, 0.46977E+05, 0.49152E+05, 0.51407E+05,
0.53746E+05, 0.56171E+05, 0.58683E+05, 0.61286E+05, 0.63981E+05,
0.66772E+05, 0.69661E+05, 0.72650E+05, 0.75742E+05, 0.78940E+05,
0.82246E+05, 0.85664E+05, 0.89196E+05, 0.92845E+05, 0.96613E+05,
0.10050E+06, 0.10452E+06, 0.10867E+06, 0.11295E+06, 0.11736E+06,
0.12191E+06, 0.12661E+06, 0.13145E+06, 0.13643E+06, 0.14157E+06,
0.14687E+06, 0.15232E+06, 0.15794E+06, 0.16372E+06, 0.16968E+06,
0.17580E+06, 0.18211E+06, 0.18859E+06, 0.19526E+06, 0.20213E+06,
0.20918E+06, 0.21643E+06, 0.22388E+06, 0.23154E+06, 0.23941E+06,
0.24750E+06])
# --------------- CO2 627: M = 2, I = 4 ---------------------
M = 2
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.66338E+03, 0.93923E+03, 0.12156E+04,
0.14938E+04, 0.17766E+04, 0.20676E+04, 0.23705E+04, 0.26891E+04,
0.30267E+04, 0.33866E+04, 0.37714E+04, 0.41839E+04, 0.46267E+04,
0.51023E+04, 0.56132E+04, 0.61618E+04, 0.67508E+04, 0.73827E+04,
0.80603E+04, 0.87863E+04, 0.95636E+04, 0.10395E+05, 0.11284E+05,
0.12233E+05, 0.13246E+05, 0.14326E+05, 0.15477E+05, 0.16702E+05,
0.18005E+05, 0.19390E+05, 0.20861E+05, 0.22422E+05, 0.24077E+05,
0.25832E+05, 0.27689E+05, 0.29655E+05, 0.31734E+05, 0.33931E+05,
0.36250E+05, 0.38698E+05, 0.41280E+05, 0.44002E+05, 0.46869E+05,
0.49886E+05, 0.53062E+05, 0.56400E+05, 0.59909E+05, 0.63594E+05,
0.67462E+05, 0.71521E+05, 0.75777E+05, 0.80238E+05, 0.84911E+05,
0.89804E+05, 0.94925E+05, 0.10028E+06, 0.10588E+06, 0.11173E+06,
0.11785E+06, 0.12423E+06, 0.13090E+06, 0.13785E+06, 0.14510E+06,
0.15265E+06, 0.16053E+06, 0.16873E+06, 0.17727E+06, 0.18615E+06,
0.19540E+06, 0.20501E+06, 0.21501E+06, 0.22540E+06, 0.23619E+06,
0.24740E+06, 0.25904E+06, 0.27112E+06, 0.28365E+06, 0.29664E+06,
0.31012E+06, 0.32409E+06, 0.33856E+06, 0.35356E+06, 0.36908E+06,
0.38516E+06, 0.40180E+06, 0.41902E+06, 0.43683E+06, 0.45525E+06,
0.47429E+06, 0.49397E+06, 0.51431E+06, 0.53532E+06, 0.55702E+06,
0.57943E+06, 0.60256E+06, 0.62644E+06, 0.65107E+06, 0.67648E+06,
0.70269E+06, 0.72972E+06, 0.75758E+06, 0.78629E+06, 0.81588E+06,
0.84636E+06, 0.87775E+06, 0.91008E+06, 0.94337E+06, 0.97763E+06,
0.10129E+07, 0.10492E+07, 0.10865E+07, 0.11249E+07, 0.11644E+07,
0.12050E+07, 0.12467E+07, 0.12896E+07, 0.13337E+07, 0.13789E+07,
0.14255E+07])
# --------------- CO2 638: M = 2, I = 5 ---------------------
M = 2
I = 5
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.22737E+03, 0.32194E+03, 0.41671E+03,
0.51226E+03, 0.60963E+03, 0.71017E+03, 0.81528E+03, 0.92628E+03,
0.10444E+04, 0.11707E+04, 0.13061E+04, 0.14518E+04, 0.16085E+04,
0.17772E+04, 0.19588E+04, 0.21542E+04, 0.23644E+04, 0.25903E+04,
0.28330E+04, 0.30934E+04, 0.33726E+04, 0.36717E+04, 0.39918E+04,
0.43342E+04, 0.47001E+04, 0.50907E+04, 0.55074E+04, 0.59515E+04,
0.64244E+04, 0.69276E+04, 0.74626E+04, 0.80310E+04, 0.86344E+04,
0.92744E+04, 0.99528E+04, 0.10671E+05, 0.11432E+05, 0.12236E+05,
0.13086E+05, 0.13984E+05, 0.14932E+05, 0.15932E+05, 0.16985E+05,
0.18096E+05, 0.19265E+05, 0.20495E+05, 0.21788E+05, 0.23148E+05,
0.24576E+05, 0.26075E+05, 0.27648E+05, 0.29298E+05, 0.31027E+05,
0.32839E+05, 0.34736E+05, 0.36721E+05, 0.38798E+05, 0.40970E+05,
0.43240E+05, 0.45611E+05, 0.48087E+05, 0.50671E+05, 0.53368E+05,
0.56180E+05, 0.59111E+05, 0.62165E+05, 0.65347E+05, 0.68659E+05,
0.72107E+05, 0.75694E+05, 0.79425E+05, 0.83303E+05, 0.87334E+05,
0.91522E+05, 0.95872E+05, 0.10039E+06, 0.10507E+06, 0.10994E+06,
0.11498E+06, 0.12021E+06, 0.12563E+06, 0.13125E+06, 0.13707E+06,
0.14309E+06, 0.14933E+06, 0.15579E+06, 0.16247E+06, 0.16938E+06,
0.17653E+06, 0.18392E+06, 0.19156E+06, 0.19946E+06, 0.20761E+06,
0.21604E+06, 0.22473E+06, 0.23371E+06, 0.24298E+06, 0.25254E+06,
0.26240E+06, 0.27258E+06, 0.28307E+06, 0.29388E+06, 0.30502E+06,
0.31651E+06, 0.32834E+06, 0.34052E+06, 0.35307E+06, 0.36599E+06,
0.37929E+06, 0.39298E+06, 0.40706E+06, 0.42155E+06, 0.43645E+06,
0.45178E+06, 0.46753E+06, 0.48373E+06, 0.50038E+06, 0.51748E+06,
0.53506E+06])
# --------------- CO2 637: M = 2, I = 6 ---------------------
M = 2
I = 6
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.13267E+04, 0.18785E+04, 0.24314E+04,
0.29888E+04, 0.35566E+04, 0.41426E+04, 0.47550E+04, 0.54013E+04,
0.60886E+04, 0.68232E+04, 0.76109E+04, 0.84574E+04, 0.93678E+04,
0.10348E+05, 0.11402E+05, 0.12536E+05, 0.13755E+05, 0.15065E+05,
0.16471E+05, 0.17980E+05, 0.19598E+05, 0.21330E+05, 0.23184E+05,
0.25166E+05, 0.27283E+05, 0.29543E+05, 0.31953E+05, 0.34521E+05,
0.37256E+05, 0.40164E+05, 0.43256E+05, 0.46541E+05, 0.50026E+05,
0.53723E+05, 0.57641E+05, 0.61790E+05, 0.66180E+05, 0.70823E+05,
0.75729E+05, 0.80910E+05, 0.86378E+05, 0.92145E+05, 0.98224E+05,
0.10463E+06, 0.11137E+06, 0.11846E+06, 0.12592E+06, 0.13375E+06,
0.14198E+06, 0.15062E+06, 0.15969E+06, 0.16920E+06, 0.17916E+06,
0.18959E+06, 0.20052E+06, 0.21196E+06, 0.22392E+06, 0.23642E+06,
0.24949E+06, 0.26314E+06, 0.27740E+06, 0.29227E+06, 0.30779E+06,
0.32398E+06, 0.34085E+06, 0.35842E+06, 0.37673E+06, 0.39579E+06,
0.41563E+06, 0.43626E+06, 0.45772E+06, 0.48003E+06, 0.50322E+06,
0.52730E+06, 0.55232E+06, 0.57829E+06, 0.60524E+06, 0.63320E+06,
0.66219E+06, 0.69226E+06, 0.72342E+06, 0.75571E+06, 0.78916E+06,
0.82380E+06, 0.85966E+06, 0.89678E+06, 0.93518E+06, 0.97490E+06,
0.10160E+07, 0.10585E+07, 0.11023E+07, 0.11477E+07, 0.11946E+07,
0.12430E+07, 0.12929E+07, 0.13445E+07, 0.13977E+07, 0.14526E+07,
0.15093E+07, 0.15677E+07, 0.16280E+07, 0.16901E+07, 0.17541E+07,
0.18200E+07, 0.18880E+07, 0.19579E+07, 0.20300E+07, 0.21042E+07,
0.21805E+07, 0.22591E+07, 0.23400E+07, 0.24232E+07, 0.25087E+07,
0.25967E+07, 0.26871E+07, 0.27801E+07, 0.28757E+07, 0.29739E+07,
0.30747E+07])
# --------------- CO2 828: M = 2, I = 7 ---------------------
M = 2
I = 7
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.60334E+02, 0.85430E+02, 0.11058E+03,
0.13590E+03, 0.16167E+03, 0.18821E+03, 0.21588E+03, 0.24502E+03,
0.27595E+03, 0.30896E+03, 0.34431E+03, 0.38225E+03, 0.42301E+03,
0.46684E+03, 0.51397E+03, 0.56464E+03, 0.61907E+03, 0.67753E+03,
0.74027E+03, 0.80753E+03, 0.87961E+03, 0.95676E+03, 0.10393E+04,
0.11275E+04, 0.12217E+04, 0.13222E+04, 0.14293E+04, 0.15434E+04,
0.16648E+04, 0.17940E+04, 0.19312E+04, 0.20769E+04, 0.22315E+04,
0.23954E+04, 0.25691E+04, 0.27529E+04, 0.29474E+04, 0.31530E+04,
0.33702E+04, 0.35995E+04, 0.38414E+04, 0.40965E+04, 0.43654E+04,
0.46484E+04, 0.49464E+04, 0.52598E+04, 0.55892E+04, 0.59353E+04,
0.62988E+04, 0.66803E+04, 0.70804E+04, 0.74998E+04, 0.79394E+04,
0.83998E+04, 0.88817E+04, 0.93859E+04, 0.99132E+04, 0.10464E+05,
0.11040E+05, 0.11642E+05, 0.12270E+05, 0.12925E+05, 0.13609E+05,
0.14321E+05, 0.15064E+05, 0.15838E+05, 0.16643E+05, 0.17482E+05,
0.18355E+05, 0.19263E+05, 0.20207E+05, 0.21188E+05, 0.22208E+05,
0.23267E+05, 0.24366E+05, 0.25508E+05, 0.26692E+05, 0.27921E+05,
0.29195E+05, 0.30516E+05, 0.31886E+05, 0.33304E+05, 0.34773E+05,
0.36294E+05, 0.37869E+05, 0.39499E+05, 0.41185E+05, 0.42929E+05,
0.44732E+05, 0.46596E+05, 0.48522E+05, 0.50513E+05, 0.52569E+05,
0.54692E+05, 0.56884E+05, 0.59146E+05, 0.61481E+05, 0.63890E+05,
0.66375E+05, 0.68937E+05, 0.71578E+05, 0.74301E+05, 0.77107E+05,
0.79998E+05, 0.82976E+05, 0.86043E+05, 0.89201E+05, 0.92452E+05,
0.95799E+05, 0.99242E+05, 0.10278E+06, 0.10643E+06, 0.11018E+06,
0.11403E+06, 0.11799E+06, 0.12206E+06, 0.12625E+06, 0.13055E+06,
0.13497E+06])
# --------------- CO2 728: M = 2, I = 8 ---------------------
M = 2
I = 8
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.70354E+03, 0.99615E+03, 0.12893E+04,
0.15846E+04, 0.18848E+04, 0.21940E+04, 0.25162E+04, 0.28554E+04,
0.32152E+04, 0.35991E+04, 0.40099E+04, 0.44507E+04, 0.49242E+04,
0.54332E+04, 0.59802E+04, 0.65681E+04, 0.71996E+04, 0.78776E+04,
0.86050E+04, 0.93847E+04, 0.10220E+05, 0.11114E+05, 0.12070E+05,
0.13091E+05, 0.14182E+05, 0.15345E+05, 0.16585E+05, 0.17906E+05,
0.19311E+05, 0.20805E+05, 0.22393E+05, 0.24078E+05, 0.25865E+05,
0.27760E+05, 0.29768E+05, 0.31893E+05, 0.34140E+05, 0.36516E+05,
0.39025E+05, 0.41674E+05, 0.44469E+05, 0.47416E+05, 0.50520E+05,
0.53789E+05, 0.57229E+05, 0.60847E+05, 0.64650E+05, 0.68645E+05,
0.72840E+05, 0.77242E+05, 0.81859E+05, 0.86699E+05, 0.91770E+05,
0.97081E+05, 0.10264E+06, 0.10846E+06, 0.11454E+06, 0.12090E+06,
0.12754E+06, 0.13447E+06, 0.14171E+06, 0.14927E+06, 0.15715E+06,
0.16536E+06, 0.17392E+06, 0.18284E+06, 0.19213E+06, 0.20179E+06,
0.21185E+06, 0.22231E+06, 0.23319E+06, 0.24450E+06, 0.25625E+06,
0.26845E+06, 0.28112E+06, 0.29427E+06, 0.30791E+06, 0.32206E+06,
0.33674E+06, 0.35196E+06, 0.36772E+06, 0.38406E+06, 0.40098E+06,
0.41850E+06, 0.43663E+06, 0.45539E+06, 0.47480E+06, 0.49488E+06,
0.51564E+06, 0.53710E+06, 0.55928E+06, 0.58219E+06, 0.60586E+06,
0.63029E+06, 0.65553E+06, 0.68157E+06, 0.70844E+06, 0.73616E+06,
0.76476E+06, 0.79424E+06, 0.82464E+06, 0.85597E+06, 0.88826E+06,
0.92153E+06, 0.95580E+06, 0.99108E+06, 0.10274E+07, 0.10648E+07,
0.11033E+07, 0.11429E+07, 0.11837E+07, 0.12256E+07, 0.12687E+07,
0.13131E+07, 0.13586E+07, 0.14055E+07, 0.14536E+07, 0.15031E+07,
0.15539E+07])
# --------------- CO2 727: M = 2, I = 9 ---------------------
M = 2
I = 9
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.20518E+04, 0.29051E+04, 0.37601E+04,
0.46209E+04, 0.54961E+04, 0.63969E+04, 0.73353E+04, 0.83227E+04,
0.93698E+04, 0.10486E+05, 0.11681E+05, 0.12962E+05, 0.14337E+05,
0.15815E+05, 0.17403E+05, 0.19110E+05, 0.20942E+05, 0.22909E+05,
0.25018E+05, 0.27278E+05, 0.29699E+05, 0.32290E+05, 0.35060E+05,
0.38019E+05, 0.41177E+05, 0.44545E+05, 0.48135E+05, 0.51957E+05,
0.56023E+05, 0.60346E+05, 0.64938E+05, 0.69812E+05, 0.74981E+05,
0.80461E+05, 0.86264E+05, 0.92406E+05, 0.98902E+05, 0.10577E+06,
0.11302E+06, 0.12067E+06, 0.12875E+06, 0.13726E+06, 0.14622E+06,
0.15566E+06, 0.16559E+06, 0.17604E+06, 0.18702E+06, 0.19855E+06,
0.21066E+06, 0.22336E+06, 0.23669E+06, 0.25065E+06, 0.26528E+06,
0.28061E+06, 0.29664E+06, 0.31342E+06, 0.33096E+06, 0.34930E+06,
0.36845E+06, 0.38845E+06, 0.40933E+06, 0.43111E+06, 0.45383E+06,
0.47751E+06, 0.50219E+06, 0.52790E+06, 0.55466E+06, 0.58252E+06,
0.61151E+06, 0.64166E+06, 0.67300E+06, 0.70558E+06, 0.73943E+06,
0.77458E+06, 0.81108E+06, 0.84896E+06, 0.88827E+06, 0.92904E+06,
0.97131E+06, 0.10151E+07, 0.10605E+07, 0.11076E+07, 0.11563E+07,
0.12068E+07, 0.12590E+07, 0.13130E+07, 0.13689E+07, 0.14267E+07,
0.14865E+07, 0.15483E+07, 0.16121E+07, 0.16781E+07, 0.17462E+07,
0.18165E+07, 0.18892E+07, 0.19641E+07, 0.20415E+07, 0.21213E+07,
0.22036E+07, 0.22884E+07, 0.23759E+07, 0.24661E+07, 0.25590E+07,
0.26547E+07, 0.27533E+07, 0.28549E+07, 0.29594E+07, 0.30670E+07,
0.31778E+07, 0.32918E+07, 0.34090E+07, 0.35296E+07, 0.36536E+07,
0.37812E+07, 0.39123E+07, 0.40470E+07, 0.41855E+07, 0.43278E+07,
0.44739E+07])
# --------------- CO2 838: M = 2, I = 10 ---------------------
M = 2
I = 10
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.12066E+03, 0.17085E+03, 0.22116E+03,
0.27190E+03, 0.32364E+03, 0.37711E+03, 0.43305E+03, 0.49219E+03,
0.55516E+03, 0.62256E+03, 0.69492E+03, 0.77276E+03, 0.85657E+03,
0.94685E+03, 0.10441E+04, 0.11488E+04, 0.12614E+04, 0.13826E+04,
0.15127E+04, 0.16525E+04, 0.18024E+04, 0.19630E+04, 0.21351E+04,
0.23191E+04, 0.25158E+04, 0.27260E+04, 0.29502E+04, 0.31892E+04,
0.34438E+04, 0.37148E+04, 0.40031E+04, 0.43094E+04, 0.46346E+04,
0.49797E+04, 0.53455E+04, 0.57331E+04, 0.61434E+04, 0.65775E+04,
0.70364E+04, 0.75212E+04, 0.80330E+04, 0.85730E+04, 0.91424E+04,
0.97423E+04, 0.10374E+05, 0.11039E+05, 0.11738E+05, 0.12474E+05,
0.13246E+05, 0.14057E+05, 0.14908E+05, 0.15801E+05, 0.16737E+05,
0.17717E+05, 0.18744E+05, 0.19819E+05, 0.20944E+05, 0.22120E+05,
0.23349E+05, 0.24634E+05, 0.25975E+05, 0.27376E+05, 0.28837E+05,
0.30361E+05, 0.31950E+05, 0.33605E+05, 0.35330E+05, 0.37126E+05,
0.38996E+05, 0.40942E+05, 0.42965E+05, 0.45069E+05, 0.47256E+05,
0.49528E+05, 0.51888E+05, 0.54338E+05, 0.56882E+05, 0.59521E+05,
0.62259E+05, 0.65097E+05, 0.68040E+05, 0.71090E+05, 0.74249E+05,
0.77522E+05, 0.80910E+05, 0.84417E+05, 0.88046E+05, 0.91801E+05,
0.95684E+05, 0.99699E+05, 0.10385E+06, 0.10814E+06, 0.11257E+06,
0.11715E+06, 0.12187E+06, 0.12675E+06, 0.13179E+06, 0.13699E+06,
0.14235E+06, 0.14788E+06, 0.15358E+06, 0.15946E+06, 0.16552E+06,
0.17176E+06, 0.17819E+06, 0.18482E+06, 0.19164E+06, 0.19867E+06,
0.20590E+06, 0.21335E+06, 0.22101E+06, 0.22889E+06, 0.23699E+06,
0.24533E+06, 0.25390E+06, 0.26271E+06, 0.27177E+06, 0.28108E+06,
0.29064E+06])
# --------------- CO2 838: M = 2, I = 0 ALIAS-----------------
TIPS_GSI_HASH[(M, 0)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, 0)] = TIPS_ISO_HASH[(M, I)]
# --------------- CO2 837: M = 2, I = 11 ---------------------
M = 2
I = 11
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.14071E+04, 0.19923E+04, 0.25789E+04,
0.31704E+04, 0.37733E+04, 0.43962E+04, 0.50477E+04, 0.57360E+04,
0.64687E+04, 0.72525E+04, 0.80938E+04, 0.89984E+04, 0.99723E+04,
0.11021E+05, 0.12150E+05, 0.13366E+05, 0.14673E+05, 0.16079E+05,
0.17589E+05, 0.19211E+05, 0.20949E+05, 0.22812E+05, 0.24807E+05,
0.26940E+05, 0.29221E+05, 0.31656E+05, 0.34254E+05, 0.37023E+05,
0.39972E+05, 0.43111E+05, 0.46449E+05, 0.49996E+05, 0.53762E+05,
0.57756E+05, 0.61991E+05, 0.66477E+05, 0.71226E+05, 0.76249E+05,
0.81558E+05, 0.87167E+05, 0.93088E+05, 0.99334E+05, 0.10592E+06,
0.11286E+06, 0.12016E+06, 0.12785E+06, 0.13594E+06, 0.14444E+06,
0.15337E+06, 0.16274E+06, 0.17258E+06, 0.18290E+06, 0.19371E+06,
0.20504E+06, 0.21691E+06, 0.22933E+06, 0.24233E+06, 0.25592E+06,
0.27012E+06, 0.28496E+06, 0.30046E+06, 0.31663E+06, 0.33351E+06,
0.35111E+06, 0.36946E+06, 0.38858E+06, 0.40850E+06, 0.42924E+06,
0.45083E+06, 0.47329E+06, 0.49666E+06, 0.52095E+06, 0.54620E+06,
0.57243E+06, 0.59967E+06, 0.62796E+06, 0.65732E+06, 0.68778E+06,
0.71938E+06, 0.75214E+06, 0.78611E+06, 0.82131E+06, 0.85777E+06,
0.89553E+06, 0.93463E+06, 0.97511E+06, 0.10170E+07, 0.10603E+07,
0.11051E+07, 0.11514E+07, 0.11993E+07, 0.12488E+07, 0.12999E+07,
0.13527E+07, 0.14073E+07, 0.14636E+07, 0.15217E+07, 0.15816E+07,
0.16435E+07, 0.17072E+07, 0.17730E+07, 0.18408E+07, 0.19107E+07,
0.19827E+07, 0.20569E+07, 0.21334E+07, 0.22121E+07, 0.22931E+07,
0.23765E+07, 0.24624E+07, 0.25507E+07, 0.26416E+07, 0.27351E+07,
0.28312E+07, 0.29301E+07, 0.30317E+07, 0.31361E+07, 0.32434E+07,
0.33537E+07])
# --------------- O3 666: M = 3, I = 1 ---------------------
M = 3
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.30333E+03, 0.51126E+03, 0.75274E+03,
0.10241E+04, 0.13236E+04, 0.16508E+04, 0.20068E+04, 0.23935E+04,
0.28136E+04, 0.32703E+04, 0.37672E+04, 0.43082E+04, 0.48975E+04,
0.55395E+04, 0.62386E+04, 0.69996E+04, 0.78272E+04, 0.87264E+04,
0.97026E+04, 0.10761E+05, 0.11907E+05, 0.13146E+05, 0.14485E+05,
0.15929E+05, 0.17484E+05, 0.19158E+05, 0.20957E+05, 0.22887E+05,
0.24956E+05, 0.27172E+05, 0.29541E+05, 0.32072E+05, 0.34773E+05,
0.37652E+05, 0.40718E+05, 0.43979E+05, 0.47444E+05, 0.51123E+05,
0.55026E+05, 0.59161E+05, 0.63540E+05, 0.68172E+05, 0.73069E+05,
0.78240E+05, 0.83698E+05, 0.89453E+05, 0.95517E+05, 0.10190E+06,
0.10862E+06, 0.11569E+06, 0.12311E+06, 0.13091E+06, 0.13909E+06,
0.14767E+06, 0.15666E+06, 0.16608E+06, 0.17594E+06, 0.18626E+06,
0.19706E+06, 0.20834E+06, 0.22012E+06, 0.23242E+06, 0.24526E+06,
0.25866E+06, 0.27262E+06, 0.28717E+06, 0.30233E+06, 0.31811E+06,
0.33453E+06, 0.35161E+06, 0.36937E+06, 0.38784E+06, 0.40702E+06,
0.42694E+06, 0.44762E+06, 0.46909E+06, 0.49135E+06, 0.51444E+06,
0.53838E+06, 0.56318E+06, 0.58887E+06, 0.61548E+06, 0.64303E+06,
0.67153E+06, 0.70102E+06, 0.73153E+06, 0.76306E+06, 0.79566E+06,
0.82934E+06, 0.86413E+06, 0.90006E+06, 0.93716E+06, 0.97545E+06,
0.10150E+07, 0.10557E+07, 0.10977E+07, 0.11411E+07, 0.11858E+07,
0.12318E+07, 0.12792E+07, 0.13281E+07, 0.13784E+07, 0.14302E+07,
0.14835E+07, 0.15384E+07, 0.15948E+07, 0.16529E+07, 0.17126E+07,
0.17740E+07, 0.18371E+07, 0.19020E+07, 0.19686E+07, 0.20371E+07,
0.21074E+07, 0.21797E+07, 0.22538E+07, 0.23300E+07, 0.24081E+07,
0.24883E+07])
# --------------- O3 668: M = 3, I = 2 ---------------------
M = 3
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.64763E+03, 0.10916E+04, 0.16073E+04,
0.21870E+04, 0.28271E+04, 0.35272E+04, 0.42900E+04, 0.51197E+04,
0.60225E+04, 0.70057E+04, 0.80771E+04, 0.92455E+04, 0.10520E+05,
0.11911E+05, 0.13427E+05, 0.15079E+05, 0.16878E+05, 0.18834E+05,
0.20960E+05, 0.23267E+05, 0.25767E+05, 0.28472E+05, 0.31397E+05,
0.34553E+05, 0.37957E+05, 0.41620E+05, 0.45559E+05, 0.49790E+05,
0.54327E+05, 0.59187E+05, 0.64387E+05, 0.69944E+05, 0.75877E+05,
0.82203E+05, 0.88943E+05, 0.96114E+05, 0.10374E+06, 0.11184E+06,
0.12043E+06, 0.12954E+06, 0.13918E+06, 0.14939E+06, 0.16018E+06,
0.17159E+06, 0.18362E+06, 0.19632E+06, 0.20970E+06, 0.22380E+06,
0.23863E+06, 0.25423E+06, 0.27063E+06, 0.28786E+06, 0.30594E+06,
0.32490E+06, 0.34478E+06, 0.36561E+06, 0.38743E+06, 0.41026E+06,
0.43413E+06, 0.45909E+06, 0.48517E+06, 0.51241E+06, 0.54084E+06,
0.57049E+06, 0.60141E+06, 0.63365E+06, 0.66722E+06, 0.70219E+06,
0.73858E+06, 0.77644E+06, 0.81581E+06, 0.85674E+06, 0.89927E+06,
0.94345E+06, 0.98932E+06, 0.10369E+07, 0.10863E+07, 0.11375E+07,
0.11906E+07, 0.12457E+07, 0.13027E+07, 0.13618E+07, 0.14229E+07,
0.14862E+07, 0.15517E+07, 0.16194E+07, 0.16894E+07, 0.17618E+07,
0.18366E+07, 0.19139E+07, 0.19937E+07, 0.20761E+07, 0.21612E+07,
0.22490E+07, 0.23395E+07, 0.24330E+07, 0.25293E+07, 0.26286E+07,
0.27309E+07, 0.28363E+07, 0.29449E+07, 0.30568E+07, 0.31720E+07,
0.32905E+07, 0.34125E+07, 0.35381E+07, 0.36672E+07, 0.38000E+07,
0.39366E+07, 0.40770E+07, 0.42213E+07, 0.43696E+07, 0.45220E+07,
0.46785E+07, 0.48392E+07, 0.50043E+07, 0.51737E+07, 0.53476E+07,
0.55261E+07])
# --------------- O3 686: M = 3, I = 3 ---------------------
M = 3
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.31656E+03, 0.53355E+03, 0.78557E+03,
0.10688E+04, 0.13815E+04, 0.17235E+04, 0.20960E+04, 0.25011E+04,
0.29420E+04, 0.34223E+04, 0.39459E+04, 0.45172E+04, 0.51408E+04,
0.58213E+04, 0.65639E+04, 0.73735E+04, 0.82555E+04, 0.92152E+04,
0.10259E+05, 0.11391E+05, 0.12619E+05, 0.13949E+05, 0.15387E+05,
0.16940E+05, 0.18614E+05, 0.20417E+05, 0.22357E+05, 0.24440E+05,
0.26675E+05, 0.29070E+05, 0.31633E+05, 0.34374E+05, 0.37299E+05,
0.40420E+05, 0.43746E+05, 0.47285E+05, 0.51049E+05, 0.55047E+05,
0.59289E+05, 0.63788E+05, 0.68554E+05, 0.73598E+05, 0.78932E+05,
0.84568E+05, 0.90519E+05, 0.96796E+05, 0.10341E+06, 0.11039E+06,
0.11772E+06, 0.12544E+06, 0.13356E+06, 0.14208E+06, 0.15103E+06,
0.16041E+06, 0.17026E+06, 0.18057E+06, 0.19137E+06, 0.20268E+06,
0.21450E+06, 0.22687E+06, 0.23979E+06, 0.25328E+06, 0.26736E+06,
0.28206E+06, 0.29738E+06, 0.31336E+06, 0.33000E+06, 0.34733E+06,
0.36537E+06, 0.38414E+06, 0.40366E+06, 0.42396E+06, 0.44505E+06,
0.46696E+06, 0.48971E+06, 0.51332E+06, 0.53782E+06, 0.56323E+06,
0.58958E+06, 0.61689E+06, 0.64518E+06, 0.67448E+06, 0.70482E+06,
0.73623E+06, 0.76872E+06, 0.80234E+06, 0.83710E+06, 0.87303E+06,
0.91017E+06, 0.94853E+06, 0.98816E+06, 0.10291E+07, 0.10713E+07,
0.11149E+07, 0.11599E+07, 0.12063E+07, 0.12541E+07, 0.13034E+07,
0.13542E+07, 0.14066E+07, 0.14606E+07, 0.15161E+07, 0.15733E+07,
0.16322E+07, 0.16928E+07, 0.17552E+07, 0.18194E+07, 0.18854E+07,
0.19532E+07, 0.20230E+07, 0.20947E+07, 0.21684E+07, 0.22441E+07,
0.23219E+07, 0.24018E+07, 0.24838E+07, 0.25680E+07, 0.26545E+07,
0.27432E+07])
# --------------- O3 667: M = 3, I = 4 ---------------------
M = 3
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.37657E+04, 0.63472E+04, 0.93454E+04,
0.12715E+05, 0.16435E+05, 0.20502E+05, 0.24929E+05, 0.29742E+05,
0.34975E+05, 0.40668E+05, 0.46868E+05, 0.53624E+05, 0.60990E+05,
0.69018E+05, 0.77768E+05, 0.87296E+05, 0.97666E+05, 0.10894E+06,
0.12118E+06, 0.13446E+06, 0.14885E+06, 0.16441E+06, 0.18123E+06,
0.19938E+06, 0.21894E+06, 0.23998E+06, 0.26261E+06, 0.28690E+06,
0.31295E+06, 0.34084E+06, 0.37068E+06, 0.40256E+06, 0.43659E+06,
0.47287E+06, 0.51151E+06, 0.55262E+06, 0.59632E+06, 0.64272E+06,
0.69194E+06, 0.74412E+06, 0.79937E+06, 0.85783E+06, 0.91963E+06,
0.98492E+06, 0.10538E+07, 0.11265E+07, 0.12031E+07, 0.12837E+07,
0.13686E+07, 0.14579E+07, 0.15517E+07, 0.16502E+07, 0.17536E+07,
0.18621E+07, 0.19758E+07, 0.20949E+07, 0.22196E+07, 0.23501E+07,
0.24866E+07, 0.26292E+07, 0.27783E+07, 0.29339E+07, 0.30963E+07,
0.32658E+07, 0.34425E+07, 0.36266E+07, 0.38184E+07, 0.40181E+07,
0.42260E+07, 0.44422E+07, 0.46671E+07, 0.49008E+07, 0.51437E+07,
0.53959E+07, 0.56578E+07, 0.59296E+07, 0.62116E+07, 0.65040E+07,
0.68071E+07, 0.71213E+07, 0.74468E+07, 0.77838E+07, 0.81328E+07,
0.84939E+07, 0.88676E+07, 0.92541E+07, 0.96536E+07, 0.10067E+08,
0.10493E+08, 0.10934E+08, 0.11390E+08, 0.11860E+08, 0.12345E+08,
0.12846E+08, 0.13363E+08, 0.13895E+08, 0.14445E+08, 0.15011E+08,
0.15595E+08, 0.16196E+08, 0.16815E+08, 0.17453E+08, 0.18110E+08,
0.18786E+08, 0.19482E+08, 0.20198E+08, 0.20934E+08, 0.21691E+08,
0.22470E+08, 0.23270E+08, 0.24093E+08, 0.24939E+08, 0.25807E+08,
0.26699E+08, 0.27616E+08, 0.28556E+08, 0.29522E+08, 0.30514E+08,
0.31531E+08])
# --------------- O3 676: M = 3, I = 5 ---------------------
M = 3
I = 5
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.18608E+04, 0.31363E+04, 0.46177E+04,
0.62826E+04, 0.81202E+04, 0.10129E+05, 0.12316E+05, 0.14693E+05,
0.17277E+05, 0.20089E+05, 0.23153E+05, 0.26492E+05, 0.30133E+05,
0.34103E+05, 0.38430E+05, 0.43145E+05, 0.48277E+05, 0.53858E+05,
0.59920E+05, 0.66497E+05, 0.73624E+05, 0.81336E+05, 0.89671E+05,
0.98668E+05, 0.10836E+06, 0.11880E+06, 0.13002E+06, 0.14207E+06,
0.15500E+06, 0.16884E+06, 0.18365E+06, 0.19947E+06, 0.21636E+06,
0.23438E+06, 0.25356E+06, 0.27398E+06, 0.29568E+06, 0.31873E+06,
0.34318E+06, 0.36911E+06, 0.39656E+06, 0.42561E+06, 0.45632E+06,
0.48877E+06, 0.52302E+06, 0.55914E+06, 0.59722E+06, 0.63732E+06,
0.67952E+06, 0.72390E+06, 0.77055E+06, 0.81954E+06, 0.87097E+06,
0.92491E+06, 0.98146E+06, 0.10407E+07, 0.11027E+07, 0.11677E+07,
0.12356E+07, 0.13066E+07, 0.13807E+07, 0.14582E+07, 0.15390E+07,
0.16233E+07, 0.17113E+07, 0.18029E+07, 0.18984E+07, 0.19978E+07,
0.21012E+07, 0.22089E+07, 0.23208E+07, 0.24372E+07, 0.25581E+07,
0.26837E+07, 0.28141E+07, 0.29494E+07, 0.30898E+07, 0.32354E+07,
0.33864E+07, 0.35428E+07, 0.37049E+07, 0.38728E+07, 0.40466E+07,
0.42264E+07, 0.44125E+07, 0.46050E+07, 0.48040E+07, 0.50098E+07,
0.52224E+07, 0.54420E+07, 0.56689E+07, 0.59031E+07, 0.61449E+07,
0.63943E+07, 0.66517E+07, 0.69172E+07, 0.71909E+07, 0.74731E+07,
0.77639E+07, 0.80635E+07, 0.83721E+07, 0.86900E+07, 0.90172E+07,
0.93541E+07, 0.97008E+07, 0.10058E+08, 0.10424E+08, 0.10802E+08,
0.11190E+08, 0.11589E+08, 0.11999E+08, 0.12420E+08, 0.12853E+08,
0.13298E+08, 0.13755E+08, 0.14223E+08, 0.14705E+08, 0.15199E+08,
0.15706E+08])
# --------------- O3 886: M = 3, I = 6 ---------------------
M = 3
I = 6
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.67639E+03, 0.11401E+04, 0.16787E+04,
0.22843E+04, 0.29532E+04, 0.36856E+04, 0.44842E+04, 0.53545E+04,
0.63030E+04, 0.73381E+04, 0.84686E+04, 0.97040E+04, 0.11054E+05,
0.12530E+05, 0.14143E+05, 0.15903E+05, 0.17823E+05, 0.19915E+05,
0.22190E+05, 0.24663E+05, 0.27346E+05, 0.30254E+05, 0.33400E+05,
0.36800E+05, 0.40469E+05, 0.44423E+05, 0.48678E+05, 0.53251E+05,
0.58160E+05, 0.63423E+05, 0.69058E+05, 0.75085E+05, 0.81524E+05,
0.88395E+05, 0.95719E+05, 0.10352E+06, 0.11181E+06, 0.12063E+06,
0.12999E+06, 0.13991E+06, 0.15043E+06, 0.16157E+06, 0.17335E+06,
0.18580E+06, 0.19895E+06, 0.21283E+06, 0.22746E+06, 0.24288E+06,
0.25911E+06, 0.27619E+06, 0.29415E+06, 0.31301E+06, 0.33283E+06,
0.35362E+06, 0.37542E+06, 0.39827E+06, 0.42221E+06, 0.44726E+06,
0.47348E+06, 0.50089E+06, 0.52954E+06, 0.55947E+06, 0.59072E+06,
0.62332E+06, 0.65733E+06, 0.69279E+06, 0.72973E+06, 0.76821E+06,
0.80827E+06, 0.84996E+06, 0.89332E+06, 0.93840E+06, 0.98526E+06,
0.10339E+07, 0.10845E+07, 0.11370E+07, 0.11914E+07, 0.12479E+07,
0.13065E+07, 0.13672E+07, 0.14302E+07, 0.14953E+07, 0.15628E+07,
0.16327E+07, 0.17050E+07, 0.17798E+07, 0.18571E+07, 0.19371E+07,
0.20197E+07, 0.21051E+07, 0.21933E+07, 0.22844E+07, 0.23785E+07,
0.24755E+07, 0.25757E+07, 0.26790E+07, 0.27855E+07, 0.28954E+07,
0.30086E+07, 0.31253E+07, 0.32455E+07, 0.33693E+07, 0.34967E+07,
0.36280E+07, 0.37631E+07, 0.39021E+07, 0.40451E+07, 0.41922E+07,
0.43435E+07, 0.44990E+07, 0.46589E+07, 0.48232E+07, 0.49920E+07,
0.51654E+07, 0.53436E+07, 0.55265E+07, 0.57143E+07, 0.59071E+07,
0.61050E+07])
# --------------- O3 868: M = 3, I = 7 ---------------------
M = 3
I = 7
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.34615E+03, 0.58348E+03, 0.85915E+03,
0.11692E+04, 0.15117E+04, 0.18868E+04, 0.22960E+04, 0.27419E+04,
0.32278E+04, 0.37579E+04, 0.43366E+04, 0.49686E+04, 0.56591E+04,
0.64134E+04, 0.72369E+04, 0.81354E+04, 0.91148E+04, 0.10181E+05,
0.11341E+05, 0.12600E+05, 0.13966E+05, 0.15446E+05, 0.17046E+05,
0.18775E+05, 0.20640E+05, 0.22649E+05, 0.24810E+05, 0.27132E+05,
0.29624E+05, 0.32295E+05, 0.35154E+05, 0.38211E+05, 0.41475E+05,
0.44958E+05, 0.48670E+05, 0.52621E+05, 0.56823E+05, 0.61288E+05,
0.66026E+05, 0.71052E+05, 0.76376E+05, 0.82011E+05, 0.87972E+05,
0.94271E+05, 0.10092E+06, 0.10794E+06, 0.11534E+06, 0.12313E+06,
0.13134E+06, 0.13997E+06, 0.14905E+06, 0.15858E+06, 0.16859E+06,
0.17909E+06, 0.19010E+06, 0.20164E+06, 0.21373E+06, 0.22638E+06,
0.23962E+06, 0.25346E+06, 0.26792E+06, 0.28302E+06, 0.29879E+06,
0.31524E+06, 0.33240E+06, 0.35029E+06, 0.36892E+06, 0.38833E+06,
0.40853E+06, 0.42956E+06, 0.45142E+06, 0.47416E+06, 0.49778E+06,
0.52233E+06, 0.54781E+06, 0.57427E+06, 0.60172E+06, 0.63019E+06,
0.65971E+06, 0.69031E+06, 0.72201E+06, 0.75485E+06, 0.78886E+06,
0.82405E+06, 0.86048E+06, 0.89815E+06, 0.93711E+06, 0.97739E+06,
0.10190E+07, 0.10620E+07, 0.11065E+07, 0.11523E+07, 0.11997E+07,
0.12485E+07, 0.12990E+07, 0.13510E+07, 0.14046E+07, 0.14599E+07,
0.15169E+07, 0.15756E+07, 0.16361E+07, 0.16984E+07, 0.17626E+07,
0.18287E+07, 0.18966E+07, 0.19666E+07, 0.20386E+07, 0.21126E+07,
0.21887E+07, 0.22669E+07, 0.23474E+07, 0.24300E+07, 0.25150E+07,
0.26022E+07, 0.26919E+07, 0.27839E+07, 0.28784E+07, 0.29753E+07,
0.30749E+07])
# --------------- O3 678: M = 3, I = 8 ---------------------
M = 3
I = 8
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.39745E+04, 0.66993E+04, 0.98642E+04,
0.13422E+05, 0.17352E+05, 0.21652E+05, 0.26339E+05, 0.31442E+05,
0.37000E+05, 0.43058E+05, 0.49669E+05, 0.56885E+05, 0.64766E+05,
0.73372E+05, 0.82765E+05, 0.93011E+05, 0.10418E+06, 0.11633E+06,
0.12955E+06, 0.14390E+06, 0.15946E+06, 0.17632E+06, 0.19455E+06,
0.21424E+06, 0.23547E+06, 0.25835E+06, 0.28296E+06, 0.30939E+06,
0.33776E+06, 0.36816E+06, 0.40070E+06, 0.43549E+06, 0.47264E+06,
0.51228E+06, 0.55451E+06, 0.59947E+06, 0.64728E+06, 0.69807E+06,
0.75198E+06, 0.80915E+06, 0.86971E+06, 0.93381E+06, 0.10016E+07,
0.10733E+07, 0.11489E+07, 0.12287E+07, 0.13128E+07, 0.14015E+07,
0.14948E+07, 0.15930E+07, 0.16961E+07, 0.18045E+07, 0.19183E+07,
0.20378E+07, 0.21629E+07, 0.22942E+07, 0.24316E+07, 0.25754E+07,
0.27258E+07, 0.28831E+07, 0.30475E+07, 0.32192E+07, 0.33984E+07,
0.35855E+07, 0.37805E+07, 0.39838E+07, 0.41956E+07, 0.44162E+07,
0.46458E+07, 0.48847E+07, 0.51332E+07, 0.53916E+07, 0.56601E+07,
0.59390E+07, 0.62286E+07, 0.65292E+07, 0.68412E+07, 0.71647E+07,
0.75002E+07, 0.78479E+07, 0.82081E+07, 0.85813E+07, 0.89676E+07,
0.93676E+07, 0.97814E+07, 0.10209E+08, 0.10652E+08, 0.11110E+08,
0.11583E+08, 0.12071E+08, 0.12576E+08, 0.13097E+08, 0.13635E+08,
0.14190E+08, 0.14763E+08, 0.15354E+08, 0.15963E+08, 0.16592E+08,
0.17239E+08, 0.17906E+08, 0.18593E+08, 0.19301E+08, 0.20030E+08,
0.20780E+08, 0.21553E+08, 0.22347E+08, 0.23165E+08, 0.24006E+08,
0.24870E+08, 0.25759E+08, 0.26673E+08, 0.27612E+08, 0.28577E+08,
0.29568E+08, 0.30585E+08, 0.31631E+08, 0.32704E+08, 0.33805E+08,
0.34936E+08])
# --------------- O3 768: M = 3, I = 9 ---------------------
M = 3
I = 9
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.40228E+04, 0.67808E+04, 0.99842E+04,
0.13586E+05, 0.17564E+05, 0.21919E+05, 0.26665E+05, 0.31833E+05,
0.37461E+05, 0.43596E+05, 0.50286E+05, 0.57589E+05, 0.65562E+05,
0.74264E+05, 0.83761E+05, 0.94115E+05, 0.10540E+06, 0.11767E+06,
0.13102E+06, 0.14550E+06, 0.16121E+06, 0.17822E+06, 0.19661E+06,
0.21646E+06, 0.23788E+06, 0.26094E+06, 0.28574E+06, 0.31239E+06,
0.34097E+06, 0.37160E+06, 0.40437E+06, 0.43941E+06, 0.47683E+06,
0.51673E+06, 0.55925E+06, 0.60451E+06, 0.65262E+06, 0.70374E+06,
0.75799E+06, 0.81550E+06, 0.87643E+06, 0.94092E+06, 0.10091E+07,
0.10812E+07, 0.11572E+07, 0.12375E+07, 0.13221E+07, 0.14112E+07,
0.15050E+07, 0.16037E+07, 0.17074E+07, 0.18164E+07, 0.19307E+07,
0.20507E+07, 0.21765E+07, 0.23084E+07, 0.24464E+07, 0.25909E+07,
0.27421E+07, 0.29001E+07, 0.30652E+07, 0.32377E+07, 0.34177E+07,
0.36055E+07, 0.38014E+07, 0.40055E+07, 0.42182E+07, 0.44397E+07,
0.46703E+07, 0.49102E+07, 0.51597E+07, 0.54191E+07, 0.56886E+07,
0.59686E+07, 0.62593E+07, 0.65611E+07, 0.68742E+07, 0.71989E+07,
0.75356E+07, 0.78846E+07, 0.82461E+07, 0.86206E+07, 0.90083E+07,
0.94097E+07, 0.98249E+07, 0.10254E+08, 0.10699E+08, 0.11158E+08,
0.11632E+08, 0.12123E+08, 0.12629E+08, 0.13152E+08, 0.13691E+08,
0.14248E+08, 0.14823E+08, 0.15416E+08, 0.16027E+08, 0.16657E+08,
0.17307E+08, 0.17976E+08, 0.18665E+08, 0.19375E+08, 0.20106E+08,
0.20858E+08, 0.21633E+08, 0.22430E+08, 0.23250E+08, 0.24093E+08,
0.24960E+08, 0.25851E+08, 0.26767E+08, 0.27709E+08, 0.28676E+08,
0.29670E+08, 0.30691E+08, 0.31739E+08, 0.32815E+08, 0.33919E+08,
0.35053E+08])
# --------------- O3 786: M = 3, I = 10 ---------------------
M = 3
I = 10
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.39315E+04, 0.66267E+04, 0.97569E+04,
0.13276E+05, 0.17162E+05, 0.21414E+05, 0.26048E+05, 0.31094E+05,
0.36590E+05, 0.42581E+05, 0.49120E+05, 0.56260E+05, 0.64061E+05,
0.72580E+05, 0.81882E+05, 0.92031E+05, 0.10309E+06, 0.11514E+06,
0.12824E+06, 0.14247E+06, 0.15791E+06, 0.17463E+06, 0.19272E+06,
0.21226E+06, 0.23333E+06, 0.25604E+06, 0.28047E+06, 0.30673E+06,
0.33490E+06, 0.36510E+06, 0.39743E+06, 0.43200E+06, 0.46892E+06,
0.50831E+06, 0.55029E+06, 0.59498E+06, 0.64251E+06, 0.69301E+06,
0.74662E+06, 0.80347E+06, 0.86370E+06, 0.92747E+06, 0.99491E+06,
0.10662E+07, 0.11414E+07, 0.12208E+07, 0.13046E+07, 0.13928E+07,
0.14856E+07, 0.15833E+07, 0.16860E+07, 0.17939E+07, 0.19072E+07,
0.20261E+07, 0.21508E+07, 0.22814E+07, 0.24182E+07, 0.25614E+07,
0.27112E+07, 0.28679E+07, 0.30316E+07, 0.32026E+07, 0.33811E+07,
0.35674E+07, 0.37617E+07, 0.39642E+07, 0.41752E+07, 0.43950E+07,
0.46237E+07, 0.48618E+07, 0.51094E+07, 0.53668E+07, 0.56343E+07,
0.59123E+07, 0.62009E+07, 0.65005E+07, 0.68113E+07, 0.71338E+07,
0.74681E+07, 0.78147E+07, 0.81737E+07, 0.85457E+07, 0.89308E+07,
0.93295E+07, 0.97420E+07, 0.10169E+08, 0.10610E+08, 0.11066E+08,
0.11538E+08, 0.12025E+08, 0.12528E+08, 0.13048E+08, 0.13584E+08,
0.14138E+08, 0.14709E+08, 0.15298E+08, 0.15906E+08, 0.16532E+08,
0.17178E+08, 0.17843E+08, 0.18528E+08, 0.19234E+08, 0.19961E+08,
0.20710E+08, 0.21480E+08, 0.22272E+08, 0.23088E+08, 0.23926E+08,
0.24789E+08, 0.25675E+08, 0.26587E+08, 0.27523E+08, 0.28485E+08,
0.29474E+08, 0.30489E+08, 0.31532E+08, 0.32603E+08, 0.33701E+08,
0.34829E+08])
# --------------- O3 776: M = 3, I = 11 ---------------------
M = 3
I = 11
TIPS_GSI_HASH[(M, I)] = __FloatType__(36.)
TIPS_ISO_HASH[(M, I)] = float32([0.23106E+05, 0.38945E+05, 0.57342E+05,
0.78021E+05, 0.10085E+06, 0.12582E+06, 0.15302E+06, 0.18262E+06,
0.21482E+06, 0.24989E+06, 0.28812E+06, 0.32983E+06, 0.37535E+06,
0.42501E+06, 0.47919E+06, 0.53825E+06, 0.60258E+06, 0.67256E+06,
0.74862E+06, 0.83118E+06, 0.92069E+06, 0.10176E+07, 0.11223E+07,
0.12354E+07, 0.13574E+07, 0.14887E+07, 0.16299E+07, 0.17816E+07,
0.19443E+07, 0.21187E+07, 0.23052E+07, 0.25047E+07, 0.27176E+07,
0.29447E+07, 0.31866E+07, 0.34441E+07, 0.37179E+07, 0.40087E+07,
0.43173E+07, 0.46444E+07, 0.49910E+07, 0.53578E+07, 0.57456E+07,
0.61554E+07, 0.65880E+07, 0.70444E+07, 0.75255E+07, 0.80322E+07,
0.85656E+07, 0.91266E+07, 0.97163E+07, 0.10336E+08, 0.10986E+08,
0.11668E+08, 0.12383E+08, 0.13133E+08, 0.13918E+08, 0.14739E+08,
0.15598E+08, 0.16496E+08, 0.17435E+08, 0.18415E+08, 0.19438E+08,
0.20505E+08, 0.21619E+08, 0.22779E+08, 0.23987E+08, 0.25246E+08,
0.26556E+08, 0.27920E+08, 0.29337E+08, 0.30811E+08, 0.32343E+08,
0.33934E+08, 0.35585E+08, 0.37300E+08, 0.39079E+08, 0.40924E+08,
0.42837E+08, 0.44819E+08, 0.46873E+08, 0.49001E+08, 0.51203E+08,
0.53483E+08, 0.55842E+08, 0.58282E+08, 0.60805E+08, 0.63414E+08,
0.66109E+08, 0.68894E+08, 0.71770E+08, 0.74740E+08, 0.77806E+08,
0.80970E+08, 0.84234E+08, 0.87600E+08, 0.91072E+08, 0.94651E+08,
0.98339E+08, 0.10214E+09, 0.10605E+09, 0.11009E+09, 0.11424E+09,
0.11851E+09, 0.12291E+09, 0.12744E+09, 0.13209E+09, 0.13688E+09,
0.14180E+09, 0.14687E+09, 0.15207E+09, 0.15742E+09, 0.16291E+09,
0.16855E+09, 0.17435E+09, 0.18030E+09, 0.18641E+09, 0.19268E+09,
0.19912E+09])
# --------------- O3 767: M = 3, I = 12 ---------------------
M = 3
I = 12
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.11692E+05, 0.19707E+05, 0.29017E+05,
0.39482E+05, 0.51038E+05, 0.63680E+05, 0.77450E+05, 0.92432E+05,
0.10873E+06, 0.12649E+06, 0.14584E+06, 0.16694E+06, 0.18996E+06,
0.21507E+06, 0.24245E+06, 0.27229E+06, 0.30478E+06, 0.34013E+06,
0.37853E+06, 0.42020E+06, 0.46536E+06, 0.51424E+06, 0.56708E+06,
0.62411E+06, 0.68559E+06, 0.75178E+06, 0.82296E+06, 0.89939E+06,
0.98137E+06, 0.10692E+07, 0.11631E+07, 0.12636E+07, 0.13708E+07,
0.14851E+07, 0.16069E+07, 0.17365E+07, 0.18742E+07, 0.20206E+07,
0.21758E+07, 0.23404E+07, 0.25148E+07, 0.26992E+07, 0.28943E+07,
0.31004E+07, 0.33179E+07, 0.35474E+07, 0.37892E+07, 0.40440E+07,
0.43121E+07, 0.45940E+07, 0.48904E+07, 0.52017E+07, 0.55285E+07,
0.58713E+07, 0.62306E+07, 0.66071E+07, 0.70014E+07, 0.74140E+07,
0.78456E+07, 0.82967E+07, 0.87681E+07, 0.92604E+07, 0.97742E+07,
0.10310E+08, 0.10869E+08, 0.11452E+08, 0.12059E+08, 0.12691E+08,
0.13348E+08, 0.14033E+08, 0.14745E+08, 0.15484E+08, 0.16253E+08,
0.17052E+08, 0.17881E+08, 0.18741E+08, 0.19634E+08, 0.20560E+08,
0.21520E+08, 0.22515E+08, 0.23546E+08, 0.24613E+08, 0.25718E+08,
0.26862E+08, 0.28046E+08, 0.29270E+08, 0.30536E+08, 0.31845E+08,
0.33197E+08, 0.34594E+08, 0.36037E+08, 0.37527E+08, 0.39065E+08,
0.40652E+08, 0.42289E+08, 0.43977E+08, 0.45719E+08, 0.47514E+08,
0.49363E+08, 0.51270E+08, 0.53233E+08, 0.55255E+08, 0.57337E+08,
0.59480E+08, 0.61686E+08, 0.63956E+08, 0.66290E+08, 0.68691E+08,
0.71160E+08, 0.73699E+08, 0.76307E+08, 0.78988E+08, 0.81743E+08,
0.84572E+08, 0.87478E+08, 0.90462E+08, 0.93525E+08, 0.96669E+08,
0.99896E+08])
# --------------- O3 888: M = 3, I = 13 ---------------------
M = 3
I = 13
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.36175E+03, 0.60978E+03, 0.89790E+03,
0.12219E+04, 0.15802E+04, 0.19728E+04, 0.24016E+04, 0.28696E+04,
0.33807E+04, 0.39394E+04, 0.45506E+04, 0.52196E+04, 0.59521E+04,
0.67538E+04, 0.76308E+04, 0.85894E+04, 0.96361E+04, 0.10777E+05,
0.12021E+05, 0.13373E+05, 0.14841E+05, 0.16434E+05, 0.18158E+05,
0.20023E+05, 0.22037E+05, 0.24208E+05, 0.26547E+05, 0.29061E+05,
0.31762E+05, 0.34659E+05, 0.37762E+05, 0.41083E+05, 0.44632E+05,
0.48421E+05, 0.52462E+05, 0.56766E+05, 0.61346E+05, 0.66215E+05,
0.71386E+05, 0.76873E+05, 0.82688E+05, 0.88848E+05, 0.95365E+05,
0.10226E+06, 0.10954E+06, 0.11722E+06, 0.12532E+06, 0.13387E+06,
0.14286E+06, 0.15233E+06, 0.16229E+06, 0.17275E+06, 0.18374E+06,
0.19528E+06, 0.20737E+06, 0.22006E+06, 0.23335E+06, 0.24726E+06,
0.26182E+06, 0.27705E+06, 0.29297E+06, 0.30960E+06, 0.32696E+06,
0.34509E+06, 0.36399E+06, 0.38371E+06, 0.40425E+06, 0.42566E+06,
0.44794E+06, 0.47114E+06, 0.49527E+06, 0.52036E+06, 0.54644E+06,
0.57354E+06, 0.60169E+06, 0.63091E+06, 0.66124E+06, 0.69270E+06,
0.72533E+06, 0.75916E+06, 0.79421E+06, 0.83053E+06, 0.86814E+06,
0.90708E+06, 0.94737E+06, 0.98907E+06, 0.10322E+07, 0.10768E+07,
0.11229E+07, 0.11705E+07, 0.12197E+07, 0.12705E+07, 0.13230E+07,
0.13771E+07, 0.14330E+07, 0.14906E+07, 0.15501E+07, 0.16114E+07,
0.16745E+07, 0.17397E+07, 0.18067E+07, 0.18759E+07, 0.19470E+07,
0.20203E+07, 0.20957E+07, 0.21733E+07, 0.22532E+07, 0.23353E+07,
0.24198E+07, 0.25067E+07, 0.25960E+07, 0.26878E+07, 0.27821E+07,
0.28790E+07, 0.29785E+07, 0.30807E+07, 0.31857E+07, 0.32934E+07,
0.34040E+07])
# --------------- O3 887: M = 3, I = 14 ---------------------
M = 3
I = 14
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.42000E+04, 0.70796E+04, 0.10424E+05,
0.14186E+05, 0.18342E+05, 0.22896E+05, 0.27866E+05, 0.33285E+05,
0.39199E+05, 0.45659E+05, 0.52720E+05, 0.60444E+05, 0.68895E+05,
0.78139E+05, 0.88246E+05, 0.99288E+05, 0.11134E+06, 0.12447E+06,
0.13877E+06, 0.15431E+06, 0.17119E+06, 0.18949E+06, 0.20930E+06,
0.23071E+06, 0.25383E+06, 0.27875E+06, 0.30558E+06, 0.33442E+06,
0.36539E+06, 0.39861E+06, 0.43418E+06, 0.47224E+06, 0.51291E+06,
0.55632E+06, 0.60260E+06, 0.65189E+06, 0.70434E+06, 0.76008E+06,
0.81927E+06, 0.88206E+06, 0.94862E+06, 0.10191E+07, 0.10937E+07,
0.11725E+07, 0.12558E+07, 0.13436E+07, 0.14363E+07, 0.15340E+07,
0.16368E+07, 0.17450E+07, 0.18588E+07, 0.19784E+07, 0.21040E+07,
0.22358E+07, 0.23741E+07, 0.25190E+07, 0.26708E+07, 0.28297E+07,
0.29961E+07, 0.31700E+07, 0.33518E+07, 0.35417E+07, 0.37400E+07,
0.39469E+07, 0.41628E+07, 0.43878E+07, 0.46224E+07, 0.48667E+07,
0.51210E+07, 0.53858E+07, 0.56611E+07, 0.59475E+07, 0.62451E+07,
0.65544E+07, 0.68755E+07, 0.72089E+07, 0.75550E+07, 0.79139E+07,
0.82861E+07, 0.86720E+07, 0.90719E+07, 0.94861E+07, 0.99151E+07,
0.10359E+08, 0.10819E+08, 0.11294E+08, 0.11786E+08, 0.12294E+08,
0.12820E+08, 0.13363E+08, 0.13924E+08, 0.14503E+08, 0.15101E+08,
0.15719E+08, 0.16356E+08, 0.17013E+08, 0.17690E+08, 0.18389E+08,
0.19109E+08, 0.19851E+08, 0.20616E+08, 0.21404E+08, 0.22215E+08,
0.23050E+08, 0.23910E+08, 0.24794E+08, 0.25704E+08, 0.26640E+08,
0.27603E+08, 0.28593E+08, 0.29610E+08, 0.30656E+08, 0.31731E+08,
0.32835E+08, 0.33969E+08, 0.35133E+08, 0.36329E+08, 0.37556E+08,
0.38816E+08])
# --------------- O3 878: M = 3, I = 15 ---------------------
M = 3
I = 15
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.21250E+04, 0.35820E+04, 0.52744E+04,
0.71778E+04, 0.92814E+04, 0.11586E+05, 0.14102E+05, 0.16845E+05,
0.19839E+05, 0.23108E+05, 0.26680E+05, 0.30588E+05, 0.34861E+05,
0.39534E+05, 0.44642E+05, 0.50219E+05, 0.56305E+05, 0.62937E+05,
0.70155E+05, 0.78001E+05, 0.86516E+05, 0.95747E+05, 0.10574E+06,
0.11653E+06, 0.12819E+06, 0.14075E+06, 0.15427E+06, 0.16881E+06,
0.18441E+06, 0.20114E+06, 0.21906E+06, 0.23823E+06, 0.25871E+06,
0.28056E+06, 0.30386E+06, 0.32867E+06, 0.35507E+06, 0.38312E+06,
0.41291E+06, 0.44450E+06, 0.47799E+06, 0.51344E+06, 0.55095E+06,
0.59060E+06, 0.63248E+06, 0.67667E+06, 0.72327E+06, 0.77238E+06,
0.82409E+06, 0.87850E+06, 0.93571E+06, 0.99583E+06, 0.10590E+07,
0.11252E+07, 0.11947E+07, 0.12675E+07, 0.13438E+07, 0.14237E+07,
0.15072E+07, 0.15946E+07, 0.16859E+07, 0.17814E+07, 0.18810E+07,
0.19849E+07, 0.20934E+07, 0.22064E+07, 0.23242E+07, 0.24469E+07,
0.25747E+07, 0.27076E+07, 0.28459E+07, 0.29897E+07, 0.31391E+07,
0.32944E+07, 0.34557E+07, 0.36231E+07, 0.37968E+07, 0.39770E+07,
0.41639E+07, 0.43576E+07, 0.45583E+07, 0.47663E+07, 0.49816E+07,
0.52045E+07, 0.54352E+07, 0.56739E+07, 0.59207E+07, 0.61759E+07,
0.64396E+07, 0.67121E+07, 0.69936E+07, 0.72844E+07, 0.75845E+07,
0.78943E+07, 0.82139E+07, 0.85436E+07, 0.88837E+07, 0.92342E+07,
0.95956E+07, 0.99680E+07, 0.10352E+08, 0.10747E+08, 0.11154E+08,
0.11573E+08, 0.12004E+08, 0.12448E+08, 0.12904E+08, 0.13374E+08,
0.13857E+08, 0.14353E+08, 0.14864E+08, 0.15388E+08, 0.15927E+08,
0.16481E+08, 0.17050E+08, 0.17634E+08, 0.18234E+08, 0.18849E+08,
0.19481E+08])
# --------------- O3 778: M = 3, I = 16 ---------------------
M = 3
I = 16
TIPS_GSI_HASH[(M, I)] = __FloatType__(36.)
TIPS_ISO_HASH[(M, I)] = float32([0.24692E+05, 0.41621E+05, 0.61284E+05,
0.83394E+05, 0.10782E+06, 0.13457E+06, 0.16375E+06, 0.19554E+06,
0.23020E+06, 0.26801E+06, 0.30930E+06, 0.35443E+06, 0.40375E+06,
0.45763E+06, 0.51650E+06, 0.58075E+06, 0.65080E+06, 0.72711E+06,
0.81012E+06, 0.90030E+06, 0.99815E+06, 0.11042E+07, 0.12189E+07,
0.13428E+07, 0.14765E+07, 0.16206E+07, 0.17757E+07, 0.19423E+07,
0.21212E+07, 0.23129E+07, 0.25181E+07, 0.27377E+07, 0.29721E+07,
0.32223E+07, 0.34890E+07, 0.37729E+07, 0.40750E+07, 0.43959E+07,
0.47365E+07, 0.50978E+07, 0.54807E+07, 0.58860E+07, 0.63147E+07,
0.67678E+07, 0.72463E+07, 0.77512E+07, 0.82836E+07, 0.88445E+07,
0.94351E+07, 0.10056E+08, 0.10710E+08, 0.11396E+08, 0.12117E+08,
0.12873E+08, 0.13666E+08, 0.14497E+08, 0.15367E+08, 0.16279E+08,
0.17232E+08, 0.18229E+08, 0.19271E+08, 0.20359E+08, 0.21495E+08,
0.22681E+08, 0.23917E+08, 0.25206E+08, 0.26549E+08, 0.27948E+08,
0.29404E+08, 0.30920E+08, 0.32496E+08, 0.34135E+08, 0.35838E+08,
0.37608E+08, 0.39445E+08, 0.41353E+08, 0.43332E+08, 0.45385E+08,
0.47514E+08, 0.49721E+08, 0.52007E+08, 0.54376E+08, 0.56829E+08,
0.59367E+08, 0.61995E+08, 0.64712E+08, 0.67523E+08, 0.70429E+08,
0.73432E+08, 0.76535E+08, 0.79740E+08, 0.83050E+08, 0.86467E+08,
0.89993E+08, 0.93632E+08, 0.97385E+08, 0.10126E+09, 0.10525E+09,
0.10936E+09, 0.11360E+09, 0.11796E+09, 0.12246E+09, 0.12709E+09,
0.13186E+09, 0.13677E+09, 0.14182E+09, 0.14701E+09, 0.15236E+09,
0.15785E+09, 0.16350E+09, 0.16931E+09, 0.17528E+09, 0.18141E+09,
0.18771E+09, 0.19418E+09, 0.20082E+09, 0.20764E+09, 0.21465E+09,
0.22183E+09])
# --------------- O3 787: M = 3, I = 17 ---------------------
M = 3
I = 17
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.12211E+05, 0.20582E+05, 0.30305E+05,
0.41237E+05, 0.53314E+05, 0.66536E+05, 0.80957E+05, 0.96672E+05,
0.11380E+06, 0.13250E+06, 0.15292E+06, 0.17524E+06, 0.19965E+06,
0.22632E+06, 0.25546E+06, 0.28728E+06, 0.32199E+06, 0.35980E+06,
0.40094E+06, 0.44565E+06, 0.49417E+06, 0.54676E+06, 0.60366E+06,
0.66516E+06, 0.73152E+06, 0.80305E+06, 0.88002E+06, 0.96276E+06,
0.10516E+07, 0.11468E+07, 0.12488E+07, 0.13578E+07, 0.14743E+07,
0.15987E+07, 0.17312E+07, 0.18723E+07, 0.20225E+07, 0.21820E+07,
0.23514E+07, 0.25310E+07, 0.27214E+07, 0.29230E+07, 0.31362E+07,
0.33616E+07, 0.35997E+07, 0.38509E+07, 0.41158E+07, 0.43949E+07,
0.46887E+07, 0.49980E+07, 0.53231E+07, 0.56647E+07, 0.60234E+07,
0.63998E+07, 0.67946E+07, 0.72084E+07, 0.76418E+07, 0.80955E+07,
0.85702E+07, 0.90666E+07, 0.95854E+07, 0.10127E+08, 0.10693E+08,
0.11284E+08, 0.11900E+08, 0.12542E+08, 0.13211E+08, 0.13907E+08,
0.14633E+08, 0.15388E+08, 0.16173E+08, 0.16990E+08, 0.17838E+08,
0.18720E+08, 0.19636E+08, 0.20586E+08, 0.21573E+08, 0.22596E+08,
0.23657E+08, 0.24757E+08, 0.25896E+08, 0.27077E+08, 0.28299E+08,
0.29565E+08, 0.30874E+08, 0.32229E+08, 0.33630E+08, 0.35079E+08,
0.36576E+08, 0.38123E+08, 0.39721E+08, 0.41371E+08, 0.43075E+08,
0.44833E+08, 0.46647E+08, 0.48518E+08, 0.50448E+08, 0.52438E+08,
0.54489E+08, 0.56603E+08, 0.58780E+08, 0.61023E+08, 0.63332E+08,
0.65710E+08, 0.68157E+08, 0.70676E+08, 0.73266E+08, 0.75931E+08,
0.78672E+08, 0.81490E+08, 0.84386E+08, 0.87363E+08, 0.90422E+08,
0.93564E+08, 0.96791E+08, 0.10011E+09, 0.10351E+09, 0.10700E+09,
0.11059E+09])
# --------------- O3 777: M = 3, I = 18 ---------------------
M = 3
I = 18
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.71750E+05, 0.12094E+06, 0.17807E+06,
0.24230E+06, 0.31324E+06, 0.39088E+06, 0.47550E+06, 0.56764E+06,
0.66800E+06, 0.77740E+06, 0.89677E+06, 0.10271E+07, 0.11694E+07,
0.13249E+07, 0.14945E+07, 0.16796E+07, 0.18813E+07, 0.21009E+07,
0.23396E+07, 0.25989E+07, 0.28801E+07, 0.31847E+07, 0.35140E+07,
0.38698E+07, 0.42535E+07, 0.46669E+07, 0.51115E+07, 0.55893E+07,
0.61019E+07, 0.66513E+07, 0.72393E+07, 0.78680E+07, 0.85395E+07,
0.92558E+07, 0.10019E+08, 0.10832E+08, 0.11696E+08, 0.12614E+08,
0.13588E+08, 0.14621E+08, 0.15716E+08, 0.16875E+08, 0.18100E+08,
0.19395E+08, 0.20762E+08, 0.22205E+08, 0.23726E+08, 0.25328E+08,
0.27015E+08, 0.28789E+08, 0.30654E+08, 0.32614E+08, 0.34671E+08,
0.36830E+08, 0.39093E+08, 0.41465E+08, 0.43949E+08, 0.46549E+08,
0.49269E+08, 0.52112E+08, 0.55084E+08, 0.58188E+08, 0.61428E+08,
0.64809E+08, 0.68335E+08, 0.72010E+08, 0.75840E+08, 0.79828E+08,
0.83979E+08, 0.88299E+08, 0.92792E+08, 0.97463E+08, 0.10232E+09,
0.10736E+09, 0.11260E+09, 0.11803E+09, 0.12367E+09, 0.12952E+09,
0.13559E+09, 0.14187E+09, 0.14839E+09, 0.15513E+09, 0.16212E+09,
0.16935E+09, 0.17683E+09, 0.18457E+09, 0.19257E+09, 0.20085E+09,
0.20940E+09, 0.21824E+09, 0.22736E+09, 0.23678E+09, 0.24651E+09,
0.25655E+09, 0.26691E+09, 0.27759E+09, 0.28861E+09, 0.29997E+09,
0.31167E+09, 0.32374E+09, 0.33616E+09, 0.34896E+09, 0.36214E+09,
0.37571E+09, 0.38967E+09, 0.40404E+09, 0.41882E+09, 0.43403E+09,
0.44966E+09, 0.46573E+09, 0.48226E+09, 0.49923E+09, 0.51668E+09,
0.53460E+09, 0.55301E+09, 0.57191E+09, 0.59131E+09, 0.61123E+09,
0.63167E+09])
# --------------- N2O 446: M = 4, I = 1 ---------------------
M = 4
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(9.)
TIPS_ISO_HASH[(M, I)] = float32([0.89943E+03, 0.12734E+04, 0.16489E+04,
0.20293E+04, 0.24205E+04, 0.28289E+04, 0.32609E+04, 0.37222E+04,
0.42180E+04, 0.47529E+04, 0.53312E+04, 0.59572E+04, 0.66348E+04,
0.73683E+04, 0.81616E+04, 0.90190E+04, 0.99450E+04, 0.10944E+05,
0.12021E+05, 0.13180E+05, 0.14426E+05, 0.15766E+05, 0.17203E+05,
0.18745E+05, 0.20396E+05, 0.22162E+05, 0.24051E+05, 0.26069E+05,
0.28222E+05, 0.30517E+05, 0.32962E+05, 0.35564E+05, 0.38331E+05,
0.41271E+05, 0.44393E+05, 0.47704E+05, 0.51214E+05, 0.54932E+05,
0.58868E+05, 0.63030E+05, 0.67429E+05, 0.72075E+05, 0.76979E+05,
0.82151E+05, 0.87604E+05, 0.93348E+05, 0.99395E+05, 0.10576E+06,
0.11245E+06, 0.11948E+06, 0.12686E+06, 0.13461E+06, 0.14275E+06,
0.15128E+06, 0.16021E+06, 0.16958E+06, 0.17938E+06, 0.18964E+06,
0.20037E+06, 0.21159E+06, 0.22331E+06, 0.23556E+06, 0.24834E+06,
0.26169E+06, 0.27561E+06, 0.29012E+06, 0.30525E+06, 0.32101E+06,
0.33743E+06, 0.35452E+06, 0.37230E+06, 0.39080E+06, 0.41004E+06,
0.43004E+06, 0.45082E+06, 0.47241E+06, 0.49483E+06, 0.51810E+06,
0.54225E+06, 0.56730E+06, 0.59329E+06, 0.62022E+06, 0.64814E+06,
0.67707E+06, 0.70703E+06, 0.73806E+06, 0.77018E+06, 0.80342E+06,
0.83781E+06, 0.87338E+06, 0.91016E+06, 0.94818E+06, 0.98748E+06,
0.10281E+07, 0.10700E+07, 0.11133E+07, 0.11581E+07, 0.12042E+07,
0.12519E+07, 0.13010E+07, 0.13517E+07, 0.14040E+07, 0.14579E+07,
0.15134E+07, 0.15707E+07, 0.16297E+07, 0.16905E+07, 0.17530E+07,
0.18175E+07, 0.18838E+07, 0.19521E+07, 0.20224E+07, 0.20947E+07,
0.21690E+07, 0.22455E+07, 0.23242E+07, 0.24050E+07, 0.24881E+07,
0.25735E+07])
# --------------- N2O 456: M = 4, I = 2 ---------------------
M = 4
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.59966E+03, 0.84903E+03, 0.10995E+04,
0.13538E+04, 0.16158E+04, 0.18903E+04, 0.21815E+04, 0.24934E+04,
0.28295E+04, 0.31927E+04, 0.35862E+04, 0.40128E+04, 0.44752E+04,
0.49763E+04, 0.55189E+04, 0.61059E+04, 0.67404E+04, 0.74256E+04,
0.81646E+04, 0.89609E+04, 0.98180E+04, 0.10740E+05, 0.11729E+05,
0.12791E+05, 0.13930E+05, 0.15149E+05, 0.16453E+05, 0.17847E+05,
0.19335E+05, 0.20922E+05, 0.22614E+05, 0.24416E+05, 0.26333E+05,
0.28371E+05, 0.30535E+05, 0.32833E+05, 0.35269E+05, 0.37851E+05,
0.40585E+05, 0.43478E+05, 0.46537E+05, 0.49769E+05, 0.53182E+05,
0.56783E+05, 0.60580E+05, 0.64582E+05, 0.68796E+05, 0.73232E+05,
0.77898E+05, 0.82803E+05, 0.87957E+05, 0.93369E+05, 0.99048E+05,
0.10501E+06, 0.11125E+06, 0.11780E+06, 0.12465E+06, 0.13182E+06,
0.13933E+06, 0.14718E+06, 0.15539E+06, 0.16396E+06, 0.17291E+06,
0.18226E+06, 0.19201E+06, 0.20218E+06, 0.21278E+06, 0.22383E+06,
0.23534E+06, 0.24733E+06, 0.25980E+06, 0.27278E+06, 0.28628E+06,
0.30032E+06, 0.31491E+06, 0.33007E+06, 0.34581E+06, 0.36216E+06,
0.37912E+06, 0.39673E+06, 0.41499E+06, 0.43392E+06, 0.45355E+06,
0.47389E+06, 0.49496E+06, 0.51678E+06, 0.53937E+06, 0.56276E+06,
0.58695E+06, 0.61199E+06, 0.63788E+06, 0.66464E+06, 0.69231E+06,
0.72090E+06, 0.75044E+06, 0.78094E+06, 0.81244E+06, 0.84496E+06,
0.87853E+06, 0.91316E+06, 0.94889E+06, 0.98573E+06, 0.10237E+07,
0.10629E+07, 0.11033E+07, 0.11449E+07, 0.11877E+07, 0.12319E+07,
0.12773E+07, 0.13241E+07, 0.13723E+07, 0.14219E+07, 0.14729E+07,
0.15254E+07, 0.15793E+07, 0.16349E+07, 0.16919E+07, 0.17506E+07,
0.18109E+07])
# --------------- N2O 546: M = 4, I = 3 ---------------------
M = 4
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.62051E+03, 0.87856E+03, 0.11377E+04,
0.14003E+04, 0.16705E+04, 0.19529E+04, 0.22518E+04, 0.25713E+04,
0.29149E+04, 0.32859E+04, 0.36873E+04, 0.41220E+04, 0.45929E+04,
0.51028E+04, 0.56547E+04, 0.62515E+04, 0.68963E+04, 0.75923E+04,
0.83428E+04, 0.91511E+04, 0.10021E+05, 0.10956E+05, 0.11960E+05,
0.13036E+05, 0.14190E+05, 0.15425E+05, 0.16746E+05, 0.18158E+05,
0.19664E+05, 0.21271E+05, 0.22984E+05, 0.24806E+05, 0.26745E+05,
0.28806E+05, 0.30995E+05, 0.33317E+05, 0.35780E+05, 0.38389E+05,
0.41151E+05, 0.44073E+05, 0.47162E+05, 0.50425E+05, 0.53871E+05,
0.57505E+05, 0.61338E+05, 0.65375E+05, 0.69628E+05, 0.74102E+05,
0.78808E+05, 0.83755E+05, 0.88951E+05, 0.94407E+05, 0.10013E+06,
0.10614E+06, 0.11243E+06, 0.11902E+06, 0.12593E+06, 0.13316E+06,
0.14072E+06, 0.14862E+06, 0.15689E+06, 0.16552E+06, 0.17453E+06,
0.18394E+06, 0.19376E+06, 0.20399E+06, 0.21466E+06, 0.22578E+06,
0.23737E+06, 0.24942E+06, 0.26198E+06, 0.27503E+06, 0.28861E+06,
0.30273E+06, 0.31741E+06, 0.33265E+06, 0.34848E+06, 0.36492E+06,
0.38197E+06, 0.39967E+06, 0.41803E+06, 0.43706E+06, 0.45679E+06,
0.47723E+06, 0.49840E+06, 0.52033E+06, 0.54303E+06, 0.56653E+06,
0.59084E+06, 0.61599E+06, 0.64200E+06, 0.66888E+06, 0.69667E+06,
0.72539E+06, 0.75506E+06, 0.78569E+06, 0.81733E+06, 0.84998E+06,
0.88369E+06, 0.91846E+06, 0.95433E+06, 0.99132E+06, 0.10295E+07,
0.10688E+07, 0.11093E+07, 0.11511E+07, 0.11941E+07, 0.12384E+07,
0.12840E+07, 0.13310E+07, 0.13793E+07, 0.14291E+07, 0.14803E+07,
0.15329E+07, 0.15871E+07, 0.16428E+07, 0.17000E+07, 0.17589E+07,
0.18194E+07])
# --------------- N2O 448: M = 4, I = 4 ---------------------
M = 4
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(9.)
TIPS_ISO_HASH[(M, I)] = float32([0.95253E+03, 0.13487E+04, 0.17465E+04,
0.21498E+04, 0.25648E+04, 0.29986E+04, 0.34580E+04, 0.39493E+04,
0.44779E+04, 0.50488E+04, 0.56669E+04, 0.63366E+04, 0.70625E+04,
0.78488E+04, 0.87003E+04, 0.96216E+04, 0.10617E+05, 0.11692E+05,
0.12852E+05, 0.14102E+05, 0.15447E+05, 0.16893E+05, 0.18446E+05,
0.20112E+05, 0.21898E+05, 0.23811E+05, 0.25856E+05, 0.28042E+05,
0.30377E+05, 0.32866E+05, 0.35520E+05, 0.38345E+05, 0.41351E+05,
0.44545E+05, 0.47939E+05, 0.51540E+05, 0.55359E+05, 0.59405E+05,
0.63689E+05, 0.68222E+05, 0.73015E+05, 0.78078E+05, 0.83424E+05,
0.89064E+05, 0.95012E+05, 0.10128E+06, 0.10788E+06, 0.11482E+06,
0.12213E+06, 0.12981E+06, 0.13788E+06, 0.14635E+06, 0.15524E+06,
0.16456E+06, 0.17433E+06, 0.18457E+06, 0.19530E+06, 0.20652E+06,
0.21827E+06, 0.23055E+06, 0.24338E+06, 0.25679E+06, 0.27079E+06,
0.28541E+06, 0.30066E+06, 0.31656E+06, 0.33314E+06, 0.35042E+06,
0.36841E+06, 0.38715E+06, 0.40666E+06, 0.42695E+06, 0.44805E+06,
0.46999E+06, 0.49279E+06, 0.51649E+06, 0.54109E+06, 0.56664E+06,
0.59315E+06, 0.62066E+06, 0.64919E+06, 0.67877E+06, 0.70943E+06,
0.74121E+06, 0.77413E+06, 0.80822E+06, 0.84351E+06, 0.88004E+06,
0.91783E+06, 0.95693E+06, 0.99737E+06, 0.10392E+07, 0.10824E+07,
0.11270E+07, 0.11732E+07, 0.12208E+07, 0.12700E+07, 0.13208E+07,
0.13732E+07, 0.14272E+07, 0.14830E+07, 0.15405E+07, 0.15999E+07,
0.16610E+07, 0.17240E+07, 0.17890E+07, 0.18559E+07, 0.19248E+07,
0.19957E+07, 0.20687E+07, 0.21439E+07, 0.22213E+07, 0.23009E+07,
0.23828E+07, 0.24671E+07, 0.25537E+07, 0.26428E+07, 0.27343E+07,
0.28284E+07])
# --------------- N2O 447: M = 4, I = 5 ---------------------
M = 4
I = 5
TIPS_GSI_HASH[(M, I)] = __FloatType__(54.)
TIPS_ISO_HASH[(M, I)] = float32([0.55598E+04, 0.78718E+04, 0.10193E+05,
0.12546E+05, 0.14966E+05, 0.17495E+05, 0.20171E+05, 0.23031E+05,
0.26106E+05, 0.29426E+05, 0.33018E+05, 0.36908E+05, 0.41121E+05,
0.45684E+05, 0.50622E+05, 0.55962E+05, 0.61731E+05, 0.67958E+05,
0.74671E+05, 0.81902E+05, 0.89681E+05, 0.98043E+05, 0.10702E+06,
0.11665E+06, 0.12697E+06, 0.13801E+06, 0.14983E+06, 0.16244E+06,
0.17591E+06, 0.19028E+06, 0.20558E+06, 0.22188E+06, 0.23920E+06,
0.25762E+06, 0.27718E+06, 0.29793E+06, 0.31993E+06, 0.34323E+06,
0.36791E+06, 0.39401E+06, 0.42160E+06, 0.45074E+06, 0.48151E+06,
0.51397E+06, 0.54819E+06, 0.58424E+06, 0.62221E+06, 0.66215E+06,
0.70416E+06, 0.74832E+06, 0.79470E+06, 0.84340E+06, 0.89450E+06,
0.94808E+06, 0.10042E+07, 0.10631E+07, 0.11247E+07, 0.11892E+07,
0.12567E+07, 0.13272E+07, 0.14009E+07, 0.14779E+07, 0.15583E+07,
0.16422E+07, 0.17298E+07, 0.18211E+07, 0.19163E+07, 0.20154E+07,
0.21187E+07, 0.22263E+07, 0.23382E+07, 0.24546E+07, 0.25757E+07,
0.27016E+07, 0.28324E+07, 0.29683E+07, 0.31095E+07, 0.32560E+07,
0.34081E+07, 0.35659E+07, 0.37295E+07, 0.38991E+07, 0.40750E+07,
0.42572E+07, 0.44459E+07, 0.46414E+07, 0.48437E+07, 0.50531E+07,
0.52698E+07, 0.54939E+07, 0.57257E+07, 0.59653E+07, 0.62129E+07,
0.64688E+07, 0.67331E+07, 0.70061E+07, 0.72880E+07, 0.75790E+07,
0.78792E+07, 0.81891E+07, 0.85086E+07, 0.88382E+07, 0.91780E+07,
0.95283E+07, 0.98893E+07, 0.10261E+08, 0.10644E+08, 0.11039E+08,
0.11445E+08, 0.11864E+08, 0.12294E+08, 0.12738E+08, 0.13194E+08,
0.13663E+08, 0.14145E+08, 0.14641E+08, 0.15151E+08, 0.15675E+08,
0.16214E+08])
# --------------- CO 26: M = 5, I = 1 ---------------------
M = 5
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.21948E+02, 0.30961E+02, 0.39980E+02,
0.49004E+02, 0.58035E+02, 0.67071E+02, 0.76112E+02, 0.85160E+02,
0.94213E+02, 0.10327E+03, 0.11234E+03, 0.12142E+03, 0.13050E+03,
0.13960E+03, 0.14872E+03, 0.15787E+03, 0.16704E+03, 0.17624E+03,
0.18548E+03, 0.19477E+03, 0.20411E+03, 0.21350E+03, 0.22295E+03,
0.23248E+03, 0.24207E+03, 0.25175E+03, 0.26151E+03, 0.27136E+03,
0.28130E+03, 0.29134E+03, 0.30148E+03, 0.31172E+03, 0.32207E+03,
0.33253E+03, 0.34312E+03, 0.35381E+03, 0.36463E+03, 0.37557E+03,
0.38663E+03, 0.39782E+03, 0.40914E+03, 0.42060E+03, 0.43218E+03,
0.44389E+03, 0.45575E+03, 0.46774E+03, 0.47987E+03, 0.49213E+03,
0.50454E+03, 0.51708E+03, 0.52978E+03, 0.54261E+03, 0.55559E+03,
0.56871E+03, 0.58198E+03, 0.59540E+03, 0.60896E+03, 0.62267E+03,
0.63653E+03, 0.65055E+03, 0.66470E+03, 0.67901E+03, 0.69347E+03,
0.70808E+03, 0.72284E+03, 0.73776E+03, 0.75283E+03, 0.76805E+03,
0.78342E+03, 0.79895E+03, 0.81463E+03, 0.83047E+03, 0.84646E+03,
0.86260E+03, 0.87891E+03, 0.89536E+03, 0.91197E+03, 0.92874E+03,
0.94566E+03, 0.96275E+03, 0.97998E+03, 0.99738E+03, 0.10149E+04,
0.10326E+04, 0.10505E+04, 0.10685E+04, 0.10867E+04, 0.11051E+04,
0.11236E+04, 0.11422E+04, 0.11611E+04, 0.11800E+04, 0.11992E+04,
0.12185E+04, 0.12380E+04, 0.12576E+04, 0.12774E+04, 0.12973E+04,
0.13174E+04, 0.13377E+04, 0.13581E+04, 0.13787E+04, 0.13994E+04,
0.14203E+04, 0.14414E+04, 0.14627E+04, 0.14841E+04, 0.15056E+04,
0.15273E+04, 0.15492E+04, 0.15713E+04, 0.15935E+04, 0.16159E+04,
0.16384E+04, 0.16611E+04, 0.16840E+04, 0.17070E+04, 0.17302E+04,
0.17536E+04])
# --------------- CO 36: M = 5, I = 2 ---------------------
M = 5
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.45888E+02, 0.64745E+02, 0.83615E+02,
0.10250E+03, 0.12139E+03, 0.14030E+03, 0.15921E+03, 0.17814E+03,
0.19708E+03, 0.21604E+03, 0.23501E+03, 0.25400E+03, 0.27302E+03,
0.29207E+03, 0.31117E+03, 0.33031E+03, 0.34952E+03, 0.36880E+03,
0.38817E+03, 0.40764E+03, 0.42723E+03, 0.44694E+03, 0.46679E+03,
0.48679E+03, 0.50696E+03, 0.52730E+03, 0.54783E+03, 0.56855E+03,
0.58948E+03, 0.61061E+03, 0.63198E+03, 0.65357E+03, 0.67539E+03,
0.69747E+03, 0.71979E+03, 0.74237E+03, 0.76521E+03, 0.78832E+03,
0.81169E+03, 0.83534E+03, 0.85927E+03, 0.88348E+03, 0.90798E+03,
0.93277E+03, 0.95784E+03, 0.98322E+03, 0.10089E+04, 0.10349E+04,
0.10611E+04, 0.10877E+04, 0.11146E+04, 0.11418E+04, 0.11693E+04,
0.11971E+04, 0.12253E+04, 0.12537E+04, 0.12825E+04, 0.13115E+04,
0.13409E+04, 0.13707E+04, 0.14007E+04, 0.14311E+04, 0.14617E+04,
0.14928E+04, 0.15241E+04, 0.15558E+04, 0.15877E+04, 0.16200E+04,
0.16527E+04, 0.16857E+04, 0.17190E+04, 0.17526E+04, 0.17866E+04,
0.18209E+04, 0.18555E+04, 0.18905E+04, 0.19258E+04, 0.19614E+04,
0.19974E+04, 0.20337E+04, 0.20703E+04, 0.21073E+04, 0.21446E+04,
0.21823E+04, 0.22203E+04, 0.22586E+04, 0.22973E+04, 0.23363E+04,
0.23756E+04, 0.24153E+04, 0.24553E+04, 0.24957E+04, 0.25364E+04,
0.25775E+04, 0.26189E+04, 0.26606E+04, 0.27027E+04, 0.27451E+04,
0.27879E+04, 0.28310E+04, 0.28745E+04, 0.29183E+04, 0.29625E+04,
0.30070E+04, 0.30518E+04, 0.30970E+04, 0.31425E+04, 0.31885E+04,
0.32347E+04, 0.32813E+04, 0.33282E+04, 0.33755E+04, 0.34231E+04,
0.34711E+04, 0.35194E+04, 0.35681E+04, 0.36172E+04, 0.36666E+04,
0.37163E+04])
# --------------- CO 28: M = 5, I = 3 ---------------------
M = 5
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.23030E+02, 0.32495E+02, 0.41966E+02,
0.51443E+02, 0.60926E+02, 0.70415E+02, 0.79910E+02, 0.89410E+02,
0.98918E+02, 0.10843E+03, 0.11795E+03, 0.12749E+03, 0.13703E+03,
0.14659E+03, 0.15618E+03, 0.16579E+03, 0.17543E+03, 0.18511E+03,
0.19483E+03, 0.20461E+03, 0.21444E+03, 0.22434E+03, 0.23430E+03,
0.24435E+03, 0.25447E+03, 0.26468E+03, 0.27499E+03, 0.28540E+03,
0.29591E+03, 0.30652E+03, 0.31725E+03, 0.32810E+03, 0.33906E+03,
0.35014E+03, 0.36136E+03, 0.37270E+03, 0.38417E+03, 0.39577E+03,
0.40752E+03, 0.41940E+03, 0.43142E+03, 0.44358E+03, 0.45589E+03,
0.46834E+03, 0.48094E+03, 0.49369E+03, 0.50659E+03, 0.51964E+03,
0.53284E+03, 0.54619E+03, 0.55971E+03, 0.57337E+03, 0.58719E+03,
0.60117E+03, 0.61530E+03, 0.62959E+03, 0.64405E+03, 0.65866E+03,
0.67343E+03, 0.68837E+03, 0.70346E+03, 0.71872E+03, 0.73414E+03,
0.74972E+03, 0.76547E+03, 0.78138E+03, 0.79745E+03, 0.81369E+03,
0.83010E+03, 0.84667E+03, 0.86341E+03, 0.88031E+03, 0.89738E+03,
0.91462E+03, 0.93202E+03, 0.94960E+03, 0.96734E+03, 0.98524E+03,
0.10033E+04, 0.10216E+04, 0.10400E+04, 0.10586E+04, 0.10773E+04,
0.10962E+04, 0.11153E+04, 0.11346E+04, 0.11540E+04, 0.11737E+04,
0.11934E+04, 0.12134E+04, 0.12335E+04, 0.12538E+04, 0.12743E+04,
0.12949E+04, 0.13157E+04, 0.13367E+04, 0.13578E+04, 0.13792E+04,
0.14007E+04, 0.14223E+04, 0.14442E+04, 0.14662E+04, 0.14884E+04,
0.15108E+04, 0.15333E+04, 0.15560E+04, 0.15789E+04, 0.16020E+04,
0.16252E+04, 0.16486E+04, 0.16722E+04, 0.16960E+04, 0.17199E+04,
0.17441E+04, 0.17684E+04, 0.17928E+04, 0.18175E+04, 0.18423E+04,
0.18673E+04])
# --------------- CO 27: M = 5, I = 4 ---------------------
M = 5
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.13505E+03, 0.19054E+03, 0.24606E+03,
0.30161E+03, 0.35720E+03, 0.41283E+03, 0.46848E+03, 0.52418E+03,
0.57991E+03, 0.63568E+03, 0.69149E+03, 0.74737E+03, 0.80332E+03,
0.85937E+03, 0.91553E+03, 0.97183E+03, 0.10283E+04, 0.10850E+04,
0.11420E+04, 0.11992E+04, 0.12568E+04, 0.13147E+04, 0.13730E+04,
0.14318E+04, 0.14910E+04, 0.15507E+04, 0.16110E+04, 0.16718E+04,
0.17332E+04, 0.17952E+04, 0.18579E+04, 0.19212E+04, 0.19852E+04,
0.20499E+04, 0.21153E+04, 0.21815E+04, 0.22484E+04, 0.23161E+04,
0.23846E+04, 0.24539E+04, 0.25240E+04, 0.25949E+04, 0.26666E+04,
0.27392E+04, 0.28127E+04, 0.28869E+04, 0.29621E+04, 0.30381E+04,
0.31150E+04, 0.31928E+04, 0.32715E+04, 0.33511E+04, 0.34316E+04,
0.35129E+04, 0.35952E+04, 0.36785E+04, 0.37626E+04, 0.38477E+04,
0.39336E+04, 0.40206E+04, 0.41084E+04, 0.41972E+04, 0.42869E+04,
0.43776E+04, 0.44692E+04, 0.45618E+04, 0.46553E+04, 0.47498E+04,
0.48452E+04, 0.49416E+04, 0.50390E+04, 0.51373E+04, 0.52366E+04,
0.53368E+04, 0.54381E+04, 0.55403E+04, 0.56435E+04, 0.57476E+04,
0.58527E+04, 0.59588E+04, 0.60659E+04, 0.61739E+04, 0.62829E+04,
0.63930E+04, 0.65040E+04, 0.66160E+04, 0.67290E+04, 0.68429E+04,
0.69579E+04, 0.70739E+04, 0.71908E+04, 0.73088E+04, 0.74277E+04,
0.75477E+04, 0.76686E+04, 0.77905E+04, 0.79135E+04, 0.80374E+04,
0.81624E+04, 0.82883E+04, 0.84153E+04, 0.85432E+04, 0.86722E+04,
0.88022E+04, 0.89331E+04, 0.90651E+04, 0.91982E+04, 0.93322E+04,
0.94672E+04, 0.96033E+04, 0.97404E+04, 0.98785E+04, 0.10018E+05,
0.10158E+05, 0.10299E+05, 0.10441E+05, 0.10584E+05, 0.10728E+05,
0.10874E+05])
# --------------- CO 38: M = 5, I = 5 ---------------------
M = 5
I = 5
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.48264E+02, 0.68112E+02, 0.87974E+02,
0.10785E+03, 0.12773E+03, 0.14763E+03, 0.16754E+03, 0.18747E+03,
0.20741E+03, 0.22736E+03, 0.24733E+03, 0.26732E+03, 0.28735E+03,
0.30741E+03, 0.32752E+03, 0.34770E+03, 0.36794E+03, 0.38828E+03,
0.40871E+03, 0.42926E+03, 0.44994E+03, 0.47077E+03, 0.49175E+03,
0.51290E+03, 0.53424E+03, 0.55578E+03, 0.57752E+03, 0.59948E+03,
0.62166E+03, 0.64409E+03, 0.66676E+03, 0.68969E+03, 0.71287E+03,
0.73633E+03, 0.76006E+03, 0.78407E+03, 0.80836E+03, 0.83295E+03,
0.85784E+03, 0.88302E+03, 0.90851E+03, 0.93431E+03, 0.96042E+03,
0.98686E+03, 0.10136E+04, 0.10407E+04, 0.10681E+04, 0.10958E+04,
0.11238E+04, 0.11522E+04, 0.11809E+04, 0.12100E+04, 0.12393E+04,
0.12691E+04, 0.12991E+04, 0.13295E+04, 0.13603E+04, 0.13914E+04,
0.14228E+04, 0.14546E+04, 0.14867E+04, 0.15192E+04, 0.15520E+04,
0.15852E+04, 0.16187E+04, 0.16526E+04, 0.16869E+04, 0.17215E+04,
0.17564E+04, 0.17917E+04, 0.18274E+04, 0.18634E+04, 0.18998E+04,
0.19365E+04, 0.19736E+04, 0.20111E+04, 0.20489E+04, 0.20871E+04,
0.21256E+04, 0.21645E+04, 0.22038E+04, 0.22434E+04, 0.22834E+04,
0.23238E+04, 0.23645E+04, 0.24056E+04, 0.24471E+04, 0.24889E+04,
0.25311E+04, 0.25736E+04, 0.26166E+04, 0.26599E+04, 0.27035E+04,
0.27476E+04, 0.27920E+04, 0.28368E+04, 0.28819E+04, 0.29275E+04,
0.29733E+04, 0.30196E+04, 0.30662E+04, 0.31133E+04, 0.31606E+04,
0.32084E+04, 0.32565E+04, 0.33050E+04, 0.33539E+04, 0.34032E+04,
0.34528E+04, 0.35028E+04, 0.35532E+04, 0.36040E+04, 0.36551E+04,
0.37067E+04, 0.37586E+04, 0.38108E+04, 0.38635E+04, 0.39165E+04,
0.39699E+04])
# --------------- CO 37: M = 5, I = 6 ---------------------
M = 5
I = 6
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.28271E+03, 0.39894E+03, 0.51524E+03,
0.63162E+03, 0.74807E+03, 0.86459E+03, 0.98119E+03, 0.10979E+04,
0.12146E+04, 0.13314E+04, 0.14484E+04, 0.15654E+04, 0.16826E+04,
0.18000E+04, 0.19176E+04, 0.20355E+04, 0.21538E+04, 0.22725E+04,
0.23916E+04, 0.25114E+04, 0.26318E+04, 0.27529E+04, 0.28749E+04,
0.29977E+04, 0.31215E+04, 0.32463E+04, 0.33721E+04, 0.34991E+04,
0.36274E+04, 0.37568E+04, 0.38876E+04, 0.40197E+04, 0.41533E+04,
0.42882E+04, 0.44247E+04, 0.45626E+04, 0.47022E+04, 0.48433E+04,
0.49860E+04, 0.51304E+04, 0.52763E+04, 0.54240E+04, 0.55735E+04,
0.57246E+04, 0.58775E+04, 0.60321E+04, 0.61886E+04, 0.63468E+04,
0.65068E+04, 0.66687E+04, 0.68324E+04, 0.69980E+04, 0.71654E+04,
0.73347E+04, 0.75058E+04, 0.76789E+04, 0.78539E+04, 0.80307E+04,
0.82096E+04, 0.83903E+04, 0.85729E+04, 0.87576E+04, 0.89441E+04,
0.91326E+04, 0.93230E+04, 0.95154E+04, 0.97098E+04, 0.99061E+04,
0.10104E+05, 0.10305E+05, 0.10507E+05, 0.10711E+05, 0.10918E+05,
0.11126E+05, 0.11336E+05, 0.11549E+05, 0.11763E+05, 0.11979E+05,
0.12198E+05, 0.12418E+05, 0.12640E+05, 0.12865E+05, 0.13091E+05,
0.13320E+05, 0.13550E+05, 0.13783E+05, 0.14018E+05, 0.14254E+05,
0.14493E+05, 0.14734E+05, 0.14977E+05, 0.15221E+05, 0.15468E+05,
0.15718E+05, 0.15969E+05, 0.16222E+05, 0.16477E+05, 0.16734E+05,
0.16994E+05, 0.17255E+05, 0.17519E+05, 0.17784E+05, 0.18052E+05,
0.18322E+05, 0.18594E+05, 0.18868E+05, 0.19144E+05, 0.19422E+05,
0.19703E+05, 0.19985E+05, 0.20270E+05, 0.20556E+05, 0.20845E+05,
0.21136E+05, 0.21429E+05, 0.21724E+05, 0.22021E+05, 0.22320E+05,
0.22622E+05])
# --------------- CH4 211: M = 6, I = 1 ---------------------
M = 6
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.54800E+02, 0.91500E+02, 0.13410E+03,
0.18180E+03, 0.23410E+03, 0.29070E+03, 0.35140E+03, 0.41600E+03,
0.48450E+03, 0.55720E+03, 0.63420E+03, 0.71600E+03, 0.80310E+03,
0.89590E+03, 0.99520E+03, 0.11017E+04, 0.12161E+04, 0.13393E+04,
0.14721E+04, 0.16155E+04, 0.17706E+04, 0.19384E+04, 0.21202E+04,
0.23172E+04, 0.25307E+04, 0.27624E+04, 0.30137E+04, 0.32864E+04,
0.35823E+04, 0.39034E+04, 0.42519E+04, 0.46300E+04, 0.50402E+04,
0.54853E+04, 0.59679E+04, 0.64913E+04, 0.70588E+04, 0.76739E+04,
0.83404E+04, 0.90625E+04, 0.98446E+04, 0.10691E+05, 0.11608E+05,
0.12600E+05, 0.13674E+05, 0.14835E+05, 0.16090E+05, 0.17447E+05,
0.18914E+05, 0.20500E+05, 0.22212E+05, 0.24063E+05, 0.26061E+05,
0.28218E+05, 0.30548E+05, 0.33063E+05, 0.35778E+05, 0.38708E+05,
0.41871E+05, 0.45284E+05, 0.48970E+05, 0.52940E+05, 0.57230E+05,
0.61860E+05, 0.66860E+05, 0.72250E+05, 0.78070E+05, 0.84350E+05,
0.91130E+05, 0.98450E+05, 0.10635E+06, 0.11488E+06, 0.12408E+06,
0.13403E+06, 0.14480E+06, 0.15640E+06, 0.16890E+06, 0.18240E+06,
0.19700E+06, 0.21280E+06, 0.22980E+06, 0.24830E+06, 0.26820E+06,
0.28970E+06, 0.31290E+06, 0.33800E+06, 0.36520E+06, 0.39450E+06,
0.42600E+06, 0.46000E+06, 0.49700E+06, 0.53700E+06, 0.58100E+06,
0.62700E+06, 0.67800E+06, 0.73300E+06, 0.79200E+06, 0.85600E+06,
0.92500E+06, 0.10000E+07, 0.10800E+07, 0.11670E+07, 0.12610E+07,
0.13620E+07, 0.14720E+07, 0.15910E+07, 0.17190E+07, 0.18600E+07,
0.20100E+07, 0.21700E+07, 0.23400E+07, 0.25300E+07, 0.27300E+07,
0.29500E+07, 0.31800E+07, 0.34300E+07, 0.37000E+07, 0.39900E+07,
0.42856E+07])
# --------------- CH4 311: M = 6, I = 2 ---------------------
M = 6
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.10958E+03, 0.18304E+03, 0.26818E+03,
0.36356E+03, 0.46820E+03, 0.58141E+03, 0.70270E+03, 0.83186E+03,
0.96893E+03, 0.11142E+04, 0.12682E+04, 0.14316E+04, 0.16055E+04,
0.17909E+04, 0.19891E+04, 0.22016E+04, 0.24297E+04, 0.26752E+04,
0.29399E+04, 0.32255E+04, 0.35342E+04, 0.38680E+04, 0.42294E+04,
0.46208E+04, 0.50449E+04, 0.55046E+04, 0.60030E+04, 0.65434E+04,
0.71293E+04, 0.77646E+04, 0.84535E+04, 0.92004E+04, 0.10010E+05,
0.10888E+05, 0.11838E+05, 0.12869E+05, 0.13984E+05, 0.15193E+05,
0.16501E+05, 0.17916E+05, 0.19448E+05, 0.21104E+05, 0.22895E+05,
0.24830E+05, 0.26921E+05, 0.29180E+05, 0.31618E+05, 0.34250E+05,
0.37090E+05, 0.40152E+05, 0.43454E+05, 0.47012E+05, 0.50845E+05,
0.54973E+05, 0.59416E+05, 0.64197E+05, 0.69340E+05, 0.74870E+05,
0.80813E+05, 0.87198E+05, 0.94055E+05, 0.10142E+06, 0.10932E+06,
0.11779E+06, 0.12688E+06, 0.13662E+06, 0.14706E+06, 0.15824E+06,
0.17021E+06, 0.18302E+06, 0.19673E+06, 0.21139E+06, 0.22706E+06,
0.24381E+06, 0.26171E+06, 0.28082E+06, 0.30122E+06, 0.32299E+06,
0.34621E+06, 0.37097E+06, 0.39737E+06, 0.42551E+06, 0.45548E+06,
0.48739E+06, 0.52136E+06, 0.55752E+06, 0.59598E+06, 0.63688E+06,
0.68036E+06, 0.72657E+06, 0.77566E+06, 0.82780E+06, 0.88316E+06,
0.94191E+06, 0.10043E+07, 0.10704E+07, 0.11405E+07, 0.12148E+07,
0.12936E+07, 0.13770E+07, 0.14654E+07, 0.15589E+07, 0.16579E+07,
0.17627E+07, 0.18736E+07, 0.19908E+07, 0.21147E+07, 0.22456E+07,
0.23840E+07, 0.25301E+07, 0.26844E+07, 0.28474E+07, 0.30193E+07,
0.32007E+07, 0.33921E+07, 0.35939E+07, 0.38067E+07, 0.40310E+07,
0.42673E+07])
# --------------- CH4 212: M = 6, I = 3 ---------------------
M = 6
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.44079E+03, 0.73786E+03, 0.10822E+04,
0.14679E+04, 0.18913E+04, 0.23497E+04, 0.28415E+04, 0.33665E+04,
0.39257E+04, 0.45211E+04, 0.51562E+04, 0.58349E+04, 0.65624E+04,
0.73445E+04, 0.81872E+04, 0.90978E+04, 0.10084E+05, 0.11153E+05,
0.12315E+05, 0.13579E+05, 0.14955E+05, 0.16455E+05, 0.18089E+05,
0.19871E+05, 0.21816E+05, 0.23937E+05, 0.26251E+05, 0.28776E+05,
0.31531E+05, 0.34535E+05, 0.37811E+05, 0.41384E+05, 0.45278E+05,
0.49521E+05, 0.54144E+05, 0.59178E+05, 0.64657E+05, 0.70621E+05,
0.77108E+05, 0.84161E+05, 0.91828E+05, 0.10016E+06, 0.10921E+06,
0.11903E+06, 0.12968E+06, 0.14124E+06, 0.15378E+06, 0.16736E+06,
0.18207E+06, 0.19800E+06, 0.21524E+06, 0.23389E+06, 0.25405E+06,
0.27585E+06, 0.29939E+06, 0.32482E+06, 0.35226E+06, 0.38186E+06,
0.41379E+06, 0.44821E+06, 0.48529E+06, 0.52522E+06, 0.56821E+06,
0.61447E+06, 0.66422E+06, 0.71771E+06, 0.77519E+06, 0.83693E+06,
0.90323E+06, 0.97438E+06, 0.10507E+07, 0.11326E+07, 0.12203E+07,
0.13143E+07, 0.14150E+07, 0.15228E+07, 0.16382E+07, 0.17616E+07,
0.18935E+07, 0.20346E+07, 0.21853E+07, 0.23463E+07, 0.25181E+07,
0.27016E+07, 0.28973E+07, 0.31060E+07, 0.33284E+07, 0.35655E+07,
0.38181E+07, 0.40870E+07, 0.43733E+07, 0.46780E+07, 0.50020E+07,
0.53467E+07, 0.57130E+07, 0.61023E+07, 0.65158E+07, 0.69549E+07,
0.74211E+07, 0.79158E+07, 0.84407E+07, 0.89973E+07, 0.95874E+07,
0.10213E+08, 0.10875E+08, 0.11577E+08, 0.12320E+08, 0.13107E+08,
0.13940E+08, 0.14820E+08, 0.15752E+08, 0.16736E+08, 0.17777E+08,
0.18877E+08, 0.20038E+08, 0.21265E+08, 0.22560E+08, 0.23927E+08,
0.25369E+08])
# --------------- CH4 312: M = 6, I = 4 ---------------------
M = 6
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.88231E+03, 0.14770E+04, 0.21661E+04,
0.29384E+04, 0.37859E+04, 0.47034E+04, 0.56879E+04, 0.67388E+04,
0.78581E+04, 0.90501E+04, 0.10321E+05, 0.11680E+05, 0.13136E+05,
0.14702E+05, 0.16389E+05, 0.18212E+05, 0.20186E+05, 0.22328E+05,
0.24654E+05, 0.27185E+05, 0.29941E+05, 0.32943E+05, 0.36216E+05,
0.39786E+05, 0.43681E+05, 0.47930E+05, 0.52567E+05, 0.57625E+05,
0.63144E+05, 0.69164E+05, 0.75730E+05, 0.82890E+05, 0.90693E+05,
0.99198E+05, 0.10846E+06, 0.11855E+06, 0.12954E+06, 0.14149E+06,
0.15450E+06, 0.16864E+06, 0.18402E+06, 0.20072E+06, 0.21886E+06,
0.23856E+06, 0.25993E+06, 0.28312E+06, 0.30825E+06, 0.33550E+06,
0.36501E+06, 0.39696E+06, 0.43155E+06, 0.46896E+06, 0.50942E+06,
0.55315E+06, 0.60039E+06, 0.65141E+06, 0.70648E+06, 0.76589E+06,
0.82997E+06, 0.89904E+06, 0.97346E+06, 0.10536E+07, 0.11399E+07,
0.12327E+07, 0.13326E+07, 0.14400E+07, 0.15554E+07, 0.16793E+07,
0.18124E+07, 0.19553E+07, 0.21085E+07, 0.22729E+07, 0.24490E+07,
0.26378E+07, 0.28400E+07, 0.30565E+07, 0.32881E+07, 0.35360E+07,
0.38010E+07, 0.40843E+07, 0.43870E+07, 0.47103E+07, 0.50555E+07,
0.54239E+07, 0.58169E+07, 0.62361E+07, 0.66830E+07, 0.71592E+07,
0.76666E+07, 0.82069E+07, 0.87820E+07, 0.93940E+07, 0.10045E+08,
0.10737E+08, 0.11473E+08, 0.12256E+08, 0.13086E+08, 0.13969E+08,
0.14905E+08, 0.15899E+08, 0.16954E+08, 0.18072E+08, 0.19258E+08,
0.20515E+08, 0.21847E+08, 0.23257E+08, 0.24750E+08, 0.26331E+08,
0.28004E+08, 0.29774E+08, 0.31646E+08, 0.33625E+08, 0.35716E+08,
0.37926E+08, 0.40261E+08, 0.42726E+08, 0.45329E+08, 0.48077E+08,
0.50975E+08])
# --------------- O2 66: M = 7, I = 1 ---------------------
M = 7
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.44334E+02, 0.62460E+02, 0.80596E+02,
0.98738E+02, 0.11688E+03, 0.13503E+03, 0.15319E+03, 0.17136E+03,
0.18954E+03, 0.20775E+03, 0.22600E+03, 0.24431E+03, 0.26270E+03,
0.28119E+03, 0.29981E+03, 0.31857E+03, 0.33750E+03, 0.35662E+03,
0.37594E+03, 0.39550E+03, 0.41529E+03, 0.43535E+03, 0.45568E+03,
0.47630E+03, 0.49722E+03, 0.51844E+03, 0.53998E+03, 0.56185E+03,
0.58406E+03, 0.60660E+03, 0.62949E+03, 0.65274E+03, 0.67635E+03,
0.70031E+03, 0.72465E+03, 0.74936E+03, 0.77444E+03, 0.79990E+03,
0.82574E+03, 0.85197E+03, 0.87858E+03, 0.90558E+03, 0.93297E+03,
0.96076E+03, 0.98895E+03, 0.10175E+04, 0.10465E+04, 0.10759E+04,
0.11057E+04, 0.11359E+04, 0.11665E+04, 0.11976E+04, 0.12290E+04,
0.12609E+04, 0.12931E+04, 0.13258E+04, 0.13590E+04, 0.13925E+04,
0.14265E+04, 0.14609E+04, 0.14958E+04, 0.15311E+04, 0.15669E+04,
0.16031E+04, 0.16397E+04, 0.16768E+04, 0.17144E+04, 0.17524E+04,
0.17909E+04, 0.18298E+04, 0.18692E+04, 0.19091E+04, 0.19495E+04,
0.19904E+04, 0.20318E+04, 0.20736E+04, 0.21160E+04, 0.21588E+04,
0.22022E+04, 0.22461E+04, 0.22905E+04, 0.23354E+04, 0.23809E+04,
0.24268E+04, 0.24734E+04, 0.25204E+04, 0.25680E+04, 0.26162E+04,
0.26649E+04, 0.27142E+04, 0.27641E+04, 0.28145E+04, 0.28655E+04,
0.29171E+04, 0.29693E+04, 0.30221E+04, 0.30755E+04, 0.31295E+04,
0.31841E+04, 0.32393E+04, 0.32951E+04, 0.33516E+04, 0.34087E+04,
0.34665E+04, 0.35249E+04, 0.35839E+04, 0.36436E+04, 0.37040E+04,
0.37650E+04, 0.38267E+04, 0.38891E+04, 0.39522E+04, 0.40159E+04,
0.40804E+04, 0.41455E+04, 0.42114E+04, 0.42780E+04, 0.43452E+04,
0.44132E+04])
# --------------- O2 68: M = 7, I = 2 ---------------------
M = 7
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.89206E+02, 0.12759E+03, 0.16600E+03,
0.20442E+03, 0.24285E+03, 0.28128E+03, 0.31973E+03, 0.35821E+03,
0.39672E+03, 0.43530E+03, 0.47398E+03, 0.51281E+03, 0.55183E+03,
0.59108E+03, 0.63062E+03, 0.67051E+03, 0.71078E+03, 0.75148E+03,
0.79265E+03, 0.83435E+03, 0.87659E+03, 0.91941E+03, 0.96285E+03,
0.10069E+04, 0.10517E+04, 0.10971E+04, 0.11432E+04, 0.11901E+04,
0.12377E+04, 0.12861E+04, 0.13352E+04, 0.13851E+04, 0.14358E+04,
0.14872E+04, 0.15395E+04, 0.15926E+04, 0.16466E+04, 0.17013E+04,
0.17569E+04, 0.18134E+04, 0.18706E+04, 0.19288E+04, 0.19877E+04,
0.20476E+04, 0.21083E+04, 0.21698E+04, 0.22323E+04, 0.22956E+04,
0.23598E+04, 0.24248E+04, 0.24908E+04, 0.25576E+04, 0.26253E+04,
0.26940E+04, 0.27635E+04, 0.28339E+04, 0.29052E+04, 0.29775E+04,
0.30506E+04, 0.31247E+04, 0.31997E+04, 0.32756E+04, 0.33524E+04,
0.34302E+04, 0.35089E+04, 0.35885E+04, 0.36691E+04, 0.37506E+04,
0.38331E+04, 0.39166E+04, 0.40010E+04, 0.40864E+04, 0.41727E+04,
0.42601E+04, 0.43484E+04, 0.44377E+04, 0.45280E+04, 0.46193E+04,
0.47116E+04, 0.48049E+04, 0.48992E+04, 0.49946E+04, 0.50909E+04,
0.51883E+04, 0.52868E+04, 0.53863E+04, 0.54868E+04, 0.55884E+04,
0.56911E+04, 0.57949E+04, 0.58997E+04, 0.60056E+04, 0.61126E+04,
0.62207E+04, 0.63298E+04, 0.64401E+04, 0.65516E+04, 0.66641E+04,
0.67778E+04, 0.68926E+04, 0.70085E+04, 0.71256E+04, 0.72439E+04,
0.73633E+04, 0.74839E+04, 0.76056E+04, 0.77286E+04, 0.78527E+04,
0.79781E+04, 0.81046E+04, 0.82324E+04, 0.83613E+04, 0.84915E+04,
0.86229E+04, 0.87556E+04, 0.88895E+04, 0.90247E+04, 0.91611E+04,
0.92988E+04])
# --------------- O2 67: M = 7, I = 3 ---------------------
M = 7
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.52071E+03, 0.74484E+03, 0.96908E+03,
0.11934E+04, 0.14177E+04, 0.16422E+04, 0.18667E+04, 0.20913E+04,
0.23161E+04, 0.25413E+04, 0.27671E+04, 0.29936E+04, 0.32212E+04,
0.34501E+04, 0.36806E+04, 0.39130E+04, 0.41476E+04, 0.43846E+04,
0.46242E+04, 0.48668E+04, 0.51125E+04, 0.53615E+04, 0.56140E+04,
0.58701E+04, 0.61300E+04, 0.63938E+04, 0.66617E+04, 0.69337E+04,
0.72099E+04, 0.74904E+04, 0.77754E+04, 0.80647E+04, 0.83586E+04,
0.86571E+04, 0.89602E+04, 0.92680E+04, 0.95805E+04, 0.98977E+04,
0.10220E+05, 0.10547E+05, 0.10878E+05, 0.11215E+05, 0.11556E+05,
0.11903E+05, 0.12254E+05, 0.12611E+05, 0.12972E+05, 0.13338E+05,
0.13710E+05, 0.14086E+05, 0.14468E+05, 0.14855E+05, 0.15247E+05,
0.15644E+05, 0.16046E+05, 0.16453E+05, 0.16866E+05, 0.17283E+05,
0.17706E+05, 0.18135E+05, 0.18568E+05, 0.19007E+05, 0.19452E+05,
0.19901E+05, 0.20356E+05, 0.20817E+05, 0.21283E+05, 0.21754E+05,
0.22231E+05, 0.22713E+05, 0.23201E+05, 0.23695E+05, 0.24194E+05,
0.24699E+05, 0.25209E+05, 0.25725E+05, 0.26247E+05, 0.26775E+05,
0.27308E+05, 0.27847E+05, 0.28393E+05, 0.28944E+05, 0.29500E+05,
0.30063E+05, 0.30632E+05, 0.31207E+05, 0.31788E+05, 0.32375E+05,
0.32968E+05, 0.33568E+05, 0.34173E+05, 0.34785E+05, 0.35403E+05,
0.36028E+05, 0.36659E+05, 0.37296E+05, 0.37939E+05, 0.38590E+05,
0.39246E+05, 0.39909E+05, 0.40579E+05, 0.41256E+05, 0.41939E+05,
0.42629E+05, 0.43325E+05, 0.44029E+05, 0.44739E+05, 0.45456E+05,
0.46180E+05, 0.46911E+05, 0.47649E+05, 0.48394E+05, 0.49146E+05,
0.49905E+05, 0.50671E+05, 0.51445E+05, 0.52226E+05, 0.53014E+05,
0.53809E+05])
# --------------- NO 46: M = 8, I = 1 ---------------------
M = 8
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.15840E+03, 0.23971E+03, 0.33080E+03,
0.42907E+03, 0.53251E+03, 0.63972E+03, 0.74975E+03, 0.86195E+03,
0.97582E+03, 0.10911E+04, 0.12074E+04, 0.13248E+04, 0.14430E+04,
0.15621E+04, 0.16820E+04, 0.18027E+04, 0.19243E+04, 0.20468E+04,
0.21703E+04, 0.22948E+04, 0.24204E+04, 0.25472E+04, 0.26753E+04,
0.28046E+04, 0.29354E+04, 0.30676E+04, 0.32013E+04, 0.33365E+04,
0.34734E+04, 0.36120E+04, 0.37522E+04, 0.38942E+04, 0.40379E+04,
0.41835E+04, 0.43310E+04, 0.44803E+04, 0.46316E+04, 0.47849E+04,
0.49400E+04, 0.50972E+04, 0.52564E+04, 0.54176E+04, 0.55809E+04,
0.57462E+04, 0.59137E+04, 0.60832E+04, 0.62548E+04, 0.64286E+04,
0.66045E+04, 0.67825E+04, 0.69628E+04, 0.71451E+04, 0.73297E+04,
0.75164E+04, 0.77053E+04, 0.78964E+04, 0.80897E+04, 0.82853E+04,
0.84830E+04, 0.86830E+04, 0.88852E+04, 0.90896E+04, 0.92963E+04,
0.95052E+04, 0.97164E+04, 0.99297E+04, 0.10145E+05, 0.10363E+05,
0.10583E+05, 0.10806E+05, 0.11031E+05, 0.11258E+05, 0.11487E+05,
0.11718E+05, 0.11952E+05, 0.12188E+05, 0.12426E+05, 0.12667E+05,
0.12910E+05, 0.13155E+05, 0.13403E+05, 0.13652E+05, 0.13905E+05,
0.14159E+05, 0.14416E+05, 0.14675E+05, 0.14936E+05, 0.15199E+05,
0.15465E+05, 0.15733E+05, 0.16004E+05, 0.16277E+05, 0.16552E+05,
0.16829E+05, 0.17109E+05, 0.17391E+05, 0.17675E+05, 0.17962E+05,
0.18251E+05, 0.18542E+05, 0.18836E+05, 0.19131E+05, 0.19430E+05,
0.19730E+05, 0.20033E+05, 0.20338E+05, 0.20646E+05, 0.20955E+05,
0.21268E+05, 0.21582E+05, 0.21899E+05, 0.22218E+05, 0.22539E+05,
0.22863E+05, 0.23189E+05, 0.23518E+05, 0.23848E+05, 0.24181E+05,
0.24517E+05])
# --------------- NO 56: M = 8, I = 2 ---------------------
M = 8
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.10942E+03, 0.16560E+03, 0.22856E+03,
0.29647E+03, 0.36795E+03, 0.44204E+03, 0.51808E+03, 0.59561E+03,
0.67432E+03, 0.75396E+03, 0.83439E+03, 0.91551E+03, 0.99725E+03,
0.10796E+04, 0.11625E+04, 0.12460E+04, 0.13302E+04, 0.14150E+04,
0.15005E+04, 0.15868E+04, 0.16739E+04, 0.17618E+04, 0.18506E+04,
0.19404E+04, 0.20311E+04, 0.21229E+04, 0.22158E+04, 0.23098E+04,
0.24050E+04, 0.25013E+04, 0.25989E+04, 0.26976E+04, 0.27977E+04,
0.28991E+04, 0.30018E+04, 0.31058E+04, 0.32112E+04, 0.33180E+04,
0.34262E+04, 0.35358E+04, 0.36468E+04, 0.37593E+04, 0.38732E+04,
0.39885E+04, 0.41054E+04, 0.42237E+04, 0.43436E+04, 0.44649E+04,
0.45877E+04, 0.47121E+04, 0.48379E+04, 0.49654E+04, 0.50943E+04,
0.52248E+04, 0.53568E+04, 0.54904E+04, 0.56255E+04, 0.57622E+04,
0.59004E+04, 0.60403E+04, 0.61816E+04, 0.63246E+04, 0.64692E+04,
0.66152E+04, 0.67630E+04, 0.69123E+04, 0.70631E+04, 0.72156E+04,
0.73696E+04, 0.75253E+04, 0.76825E+04, 0.78414E+04, 0.80018E+04,
0.81638E+04, 0.83275E+04, 0.84927E+04, 0.86596E+04, 0.88280E+04,
0.89981E+04, 0.91698E+04, 0.93430E+04, 0.95180E+04, 0.96945E+04,
0.98726E+04, 0.10052E+05, 0.10234E+05, 0.10417E+05, 0.10601E+05,
0.10788E+05, 0.10975E+05, 0.11165E+05, 0.11356E+05, 0.11549E+05,
0.11743E+05, 0.11939E+05, 0.12137E+05, 0.12336E+05, 0.12537E+05,
0.12739E+05, 0.12943E+05, 0.13149E+05, 0.13356E+05, 0.13565E+05,
0.13776E+05, 0.13988E+05, 0.14202E+05, 0.14418E+05, 0.14635E+05,
0.14853E+05, 0.15074E+05, 0.15296E+05, 0.15520E+05, 0.15745E+05,
0.15972E+05, 0.16200E+05, 0.16431E+05, 0.16663E+05, 0.16896E+05,
0.17131E+05])
# --------------- NO 48: M = 8, I = 3 ---------------------
M = 8
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.16695E+03, 0.25269E+03, 0.34876E+03,
0.45239E+03, 0.56148E+03, 0.67455E+03, 0.79059E+03, 0.90891E+03,
0.10290E+04, 0.11506E+04, 0.12733E+04, 0.13971E+04, 0.15219E+04,
0.16476E+04, 0.17742E+04, 0.19017E+04, 0.20302E+04, 0.21598E+04,
0.22904E+04, 0.24223E+04, 0.25553E+04, 0.26897E+04, 0.28255E+04,
0.29628E+04, 0.31016E+04, 0.32420E+04, 0.33842E+04, 0.35280E+04,
0.36736E+04, 0.38211E+04, 0.39704E+04, 0.41217E+04, 0.42750E+04,
0.44302E+04, 0.45876E+04, 0.47469E+04, 0.49084E+04, 0.50720E+04,
0.52378E+04, 0.54058E+04, 0.55759E+04, 0.57483E+04, 0.59230E+04,
0.60999E+04, 0.62791E+04, 0.64605E+04, 0.66443E+04, 0.68304E+04,
0.70187E+04, 0.72095E+04, 0.74026E+04, 0.75980E+04, 0.77958E+04,
0.79960E+04, 0.81986E+04, 0.84036E+04, 0.86109E+04, 0.88207E+04,
0.90328E+04, 0.92474E+04, 0.94644E+04, 0.96839E+04, 0.99057E+04,
0.10130E+05, 0.10357E+05, 0.10586E+05, 0.10817E+05, 0.11052E+05,
0.11288E+05, 0.11527E+05, 0.11768E+05, 0.12012E+05, 0.12259E+05,
0.12507E+05, 0.12759E+05, 0.13012E+05, 0.13269E+05, 0.13527E+05,
0.13788E+05, 0.14052E+05, 0.14318E+05, 0.14587E+05, 0.14858E+05,
0.15131E+05, 0.15408E+05, 0.15686E+05, 0.15967E+05, 0.16251E+05,
0.16537E+05, 0.16825E+05, 0.17116E+05, 0.17410E+05, 0.17706E+05,
0.18004E+05, 0.18305E+05, 0.18609E+05, 0.18915E+05, 0.19224E+05,
0.19535E+05, 0.19848E+05, 0.20164E+05, 0.20483E+05, 0.20804E+05,
0.21127E+05, 0.21453E+05, 0.21782E+05, 0.22113E+05, 0.22447E+05,
0.22783E+05, 0.23122E+05, 0.23463E+05, 0.23807E+05, 0.24153E+05,
0.24502E+05, 0.24853E+05, 0.25207E+05, 0.25563E+05, 0.25922E+05,
0.26283E+05])
# --------------- SO2 626: M = 9, I = 1 ---------------------
M = 9
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.52899E+03, 0.89171E+03, 0.13139E+04,
0.17915E+04, 0.23246E+04, 0.29155E+04, 0.35675E+04, 0.42848E+04,
0.50723E+04, 0.59352E+04, 0.68794E+04, 0.79109E+04, 0.90366E+04,
0.10264E+05, 0.11599E+05, 0.13052E+05, 0.14629E+05, 0.16340E+05,
0.18193E+05, 0.20199E+05, 0.22366E+05, 0.24704E+05, 0.27225E+05,
0.29938E+05, 0.32855E+05, 0.35987E+05, 0.39346E+05, 0.42944E+05,
0.46794E+05, 0.50909E+05, 0.55302E+05, 0.59986E+05, 0.64977E+05,
0.70288E+05, 0.75934E+05, 0.81931E+05, 0.88294E+05, 0.95040E+05,
0.10219E+06, 0.10975E+06, 0.11774E+06, 0.12619E+06, 0.13511E+06,
0.14452E+06, 0.15443E+06, 0.16487E+06, 0.17586E+06, 0.18742E+06,
0.19957E+06, 0.21234E+06, 0.22573E+06, 0.23978E+06, 0.25451E+06,
0.26995E+06, 0.28611E+06, 0.30302E+06, 0.32071E+06, 0.33920E+06,
0.35852E+06, 0.37869E+06, 0.39974E+06, 0.42171E+06, 0.44461E+06,
0.46848E+06, 0.49334E+06, 0.51922E+06, 0.54617E+06, 0.57419E+06,
0.60334E+06, 0.63363E+06, 0.66511E+06, 0.69780E+06, 0.73174E+06,
0.76696E+06, 0.80349E+06, 0.84138E+06, 0.88066E+06, 0.92136E+06,
0.96352E+06, 0.10072E+07, 0.10524E+07, 0.10992E+07, 0.11475E+07,
0.11976E+07, 0.12493E+07, 0.13028E+07, 0.13580E+07, 0.14151E+07,
0.14741E+07, 0.15349E+07, 0.15977E+07, 0.16625E+07, 0.17293E+07,
0.17982E+07, 0.18693E+07, 0.19425E+07, 0.20180E+07, 0.20958E+07,
0.21758E+07, 0.22583E+07, 0.23432E+07, 0.24305E+07, 0.25204E+07,
0.26129E+07, 0.27080E+07, 0.28058E+07, 0.29064E+07, 0.30097E+07,
0.31159E+07, 0.32250E+07, 0.33371E+07, 0.34522E+07, 0.35705E+07,
0.36918E+07, 0.38164E+07, 0.39442E+07, 0.40754E+07, 0.42099E+07,
0.43479E+07])
# --------------- SO2 646: M = 9, I = 2 ---------------------
M = 9
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.53140E+03, 0.89578E+03, 0.13199E+04,
0.17997E+04, 0.23353E+04, 0.29288E+04, 0.35837E+04, 0.43043E+04,
0.50953E+04, 0.59621E+04, 0.69104E+04, 0.79465E+04, 0.90772E+04,
0.10310E+05, 0.11651E+05, 0.13110E+05, 0.14694E+05, 0.16413E+05,
0.18274E+05, 0.20289E+05, 0.22465E+05, 0.24814E+05, 0.27345E+05,
0.30070E+05, 0.33000E+05, 0.36145E+05, 0.39519E+05, 0.43133E+05,
0.46999E+05, 0.51132E+05, 0.55544E+05, 0.60248E+05, 0.65260E+05,
0.70594E+05, 0.76264E+05, 0.82287E+05, 0.88678E+05, 0.95453E+05,
0.10263E+06, 0.11022E+06, 0.11825E+06, 0.12674E+06, 0.13569E+06,
0.14514E+06, 0.15510E+06, 0.16558E+06, 0.17662E+06, 0.18823E+06,
0.20043E+06, 0.21325E+06, 0.22670E+06, 0.24081E+06, 0.25561E+06,
0.27111E+06, 0.28733E+06, 0.30432E+06, 0.32208E+06, 0.34065E+06,
0.36005E+06, 0.38031E+06, 0.40145E+06, 0.42351E+06, 0.44651E+06,
0.47047E+06, 0.49544E+06, 0.52144E+06, 0.54849E+06, 0.57664E+06,
0.60591E+06, 0.63633E+06, 0.66794E+06, 0.70077E+06, 0.73485E+06,
0.77022E+06, 0.80691E+06, 0.84496E+06, 0.88440E+06, 0.92527E+06,
0.96761E+06, 0.10115E+07, 0.10568E+07, 0.11038E+07, 0.11524E+07,
0.12027E+07, 0.12546E+07, 0.13083E+07, 0.13638E+07, 0.14211E+07,
0.14803E+07, 0.15414E+07, 0.16045E+07, 0.16695E+07, 0.17366E+07,
0.18059E+07, 0.18772E+07, 0.19507E+07, 0.20265E+07, 0.21046E+07,
0.21850E+07, 0.22678E+07, 0.23531E+07, 0.24408E+07, 0.25310E+07,
0.26239E+07, 0.27194E+07, 0.28176E+07, 0.29186E+07, 0.30224E+07,
0.31290E+07, 0.32386E+07, 0.33512E+07, 0.34668E+07, 0.35855E+07,
0.37074E+07, 0.38324E+07, 0.39608E+07, 0.40925E+07, 0.42276E+07,
0.43662E+07])
# --------------- NO2 646: M = 10, I = 1 ---------------------
M = 10
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.12046E+04, 0.20297E+04, 0.29875E+04,
0.40626E+04, 0.52463E+04, 0.65350E+04, 0.79286E+04, 0.94298E+04,
0.11043E+05, 0.12776E+05, 0.14634E+05, 0.16627E+05, 0.18765E+05,
0.21056E+05, 0.23511E+05, 0.26143E+05, 0.28961E+05, 0.31979E+05,
0.35209E+05, 0.38663E+05, 0.42355E+05, 0.46300E+05, 0.50510E+05,
0.55001E+05, 0.59787E+05, 0.64884E+05, 0.70308E+05, 0.76075E+05,
0.82201E+05, 0.88704E+05, 0.95602E+05, 0.10291E+06, 0.11065E+06,
0.11884E+06, 0.12750E+06, 0.13665E+06, 0.14631E+06, 0.15650E+06,
0.16724E+06, 0.17856E+06, 0.19047E+06, 0.20301E+06, 0.21618E+06,
0.23002E+06, 0.24456E+06, 0.25981E+06, 0.27580E+06, 0.29256E+06,
0.31012E+06, 0.32850E+06, 0.34773E+06, 0.36784E+06, 0.38886E+06,
0.41082E+06, 0.43374E+06, 0.45766E+06, 0.48262E+06, 0.50863E+06,
0.53574E+06, 0.56398E+06, 0.59339E+06, 0.62398E+06, 0.65581E+06,
0.68891E+06, 0.72331E+06, 0.75905E+06, 0.79617E+06, 0.83470E+06,
0.87469E+06, 0.91617E+06, 0.95919E+06, 0.10038E+07, 0.10500E+07,
0.10979E+07, 0.11474E+07, 0.11988E+07, 0.12519E+07, 0.13068E+07,
0.13636E+07, 0.14224E+07, 0.14831E+07, 0.15459E+07, 0.16107E+07,
0.16776E+07, 0.17467E+07, 0.18180E+07, 0.18916E+07, 0.19675E+07,
0.20458E+07, 0.21265E+07, 0.22097E+07, 0.22954E+07, 0.23837E+07,
0.24747E+07, 0.25684E+07, 0.26648E+07, 0.27641E+07, 0.28662E+07,
0.29713E+07, 0.30794E+07, 0.31905E+07, 0.33048E+07, 0.34223E+07,
0.35430E+07, 0.36670E+07, 0.37944E+07, 0.39253E+07, 0.40597E+07,
0.41976E+07, 0.43393E+07, 0.44846E+07, 0.46337E+07, 0.47867E+07,
0.49437E+07, 0.51046E+07, 0.52696E+07, 0.54388E+07, 0.56122E+07,
0.57900E+07])
# --------------- NH3 4111: M = 11, I = 1 ---------------------
M = 11
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.16013E+03, 0.26692E+03, 0.39067E+03,
0.52933E+03, 0.68153E+03, 0.84641E+03, 0.10234E+04, 0.12125E+04,
0.14136E+04, 0.16272E+04, 0.18537E+04, 0.20937E+04, 0.23481E+04,
0.26177E+04, 0.29035E+04, 0.32065E+04, 0.35279E+04, 0.38688E+04,
0.42304E+04, 0.46141E+04, 0.50212E+04, 0.54531E+04, 0.59114E+04,
0.63976E+04, 0.69133E+04, 0.74602E+04, 0.80401E+04, 0.86549E+04,
0.93066E+04, 0.99971E+04, 0.10729E+05, 0.11504E+05, 0.12324E+05,
0.13193E+05, 0.14112E+05, 0.15085E+05, 0.16114E+05, 0.17201E+05,
0.18352E+05, 0.19567E+05, 0.20851E+05, 0.22208E+05, 0.23640E+05,
0.25152E+05, 0.26747E+05, 0.28430E+05, 0.30205E+05, 0.32077E+05,
0.34050E+05, 0.36128E+05, 0.38317E+05, 0.40623E+05, 0.43050E+05,
0.45605E+05, 0.48292E+05, 0.51119E+05, 0.54091E+05, 0.57215E+05,
0.60498E+05, 0.63947E+05, 0.67569E+05, 0.71372E+05, 0.75364E+05,
0.79552E+05, 0.83946E+05, 0.88553E+05, 0.93384E+05, 0.98447E+05,
0.10375E+06, 0.10931E+06, 0.11513E+06, 0.12122E+06, 0.12760E+06,
0.13427E+06, 0.14125E+06, 0.14855E+06, 0.15619E+06, 0.16417E+06,
0.17250E+06, 0.18121E+06, 0.19031E+06, 0.19981E+06, 0.20973E+06,
0.22008E+06, 0.23088E+06, 0.24215E+06, 0.25390E+06, 0.26615E+06,
0.27892E+06, 0.29223E+06, 0.30610E+06, 0.32055E+06, 0.33559E+06,
0.35125E+06, 0.36756E+06, 0.38453E+06, 0.40219E+06, 0.42056E+06,
0.43967E+06, 0.45953E+06, 0.48019E+06, 0.50165E+06, 0.52396E+06,
0.54714E+06, 0.57122E+06, 0.59622E+06, 0.62218E+06, 0.64913E+06,
0.67710E+06, 0.70613E+06, 0.73624E+06, 0.76748E+06, 0.79988E+06,
0.83347E+06, 0.86829E+06, 0.90439E+06, 0.94180E+06, 0.98056E+06,
0.10207E+07])
# --------------- NH3 5111: M = 11, I = 2 ---------------------
M = 11
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.10697E+03, 0.17832E+03, 0.26100E+03,
0.35364E+03, 0.45533E+03, 0.56549E+03, 0.68377E+03, 0.81007E+03,
0.94447E+03, 0.10872E+04, 0.12385E+04, 0.13988E+04, 0.15688E+04,
0.17490E+04, 0.19399E+04, 0.21424E+04, 0.23571E+04, 0.25848E+04,
0.28264E+04, 0.30828E+04, 0.33548E+04, 0.36434E+04, 0.39496E+04,
0.42745E+04, 0.46190E+04, 0.49845E+04, 0.53720E+04, 0.57828E+04,
0.62182E+04, 0.66796E+04, 0.71684E+04, 0.76862E+04, 0.82344E+04,
0.88149E+04, 0.94292E+04, 0.10079E+05, 0.10767E+05, 0.11494E+05,
0.12262E+05, 0.13074E+05, 0.13932E+05, 0.14839E+05, 0.15796E+05,
0.16806E+05, 0.17872E+05, 0.18997E+05, 0.20183E+05, 0.21434E+05,
0.22752E+05, 0.24141E+05, 0.25604E+05, 0.27145E+05, 0.28767E+05,
0.30475E+05, 0.32271E+05, 0.34160E+05, 0.36146E+05, 0.38234E+05,
0.40428E+05, 0.42733E+05, 0.45154E+05, 0.47696E+05, 0.50364E+05,
0.53163E+05, 0.56100E+05, 0.59180E+05, 0.62408E+05, 0.65792E+05,
0.69339E+05, 0.73053E+05, 0.76943E+05, 0.81016E+05, 0.85279E+05,
0.89740E+05, 0.94406E+05, 0.99287E+05, 0.10439E+06, 0.10972E+06,
0.11530E+06, 0.12112E+06, 0.12720E+06, 0.13355E+06, 0.14018E+06,
0.14711E+06, 0.15433E+06, 0.16186E+06, 0.16971E+06, 0.17791E+06,
0.18645E+06, 0.19534E+06, 0.20462E+06, 0.21428E+06, 0.22434E+06,
0.23481E+06, 0.24572E+06, 0.25706E+06, 0.26887E+06, 0.28116E+06,
0.29393E+06, 0.30722E+06, 0.32103E+06, 0.33539E+06, 0.35031E+06,
0.36581E+06, 0.38191E+06, 0.39864E+06, 0.41600E+06, 0.43403E+06,
0.45274E+06, 0.47215E+06, 0.49230E+06, 0.51319E+06, 0.53487E+06,
0.55734E+06, 0.58064E+06, 0.60478E+06, 0.62981E+06, 0.65574E+06,
0.68260E+06])
# --------------- HNO3 146: M = 12, I = 1 ---------------------
M = 12
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.15010E+05, 0.25316E+05, 0.37374E+05,
0.51216E+05, 0.67105E+05, 0.85473E+05, 0.10688E+06, 0.13201E+06,
0.16165E+06, 0.19671E+06, 0.23825E+06, 0.28749E+06, 0.34583E+06,
0.41490E+06, 0.49657E+06, 0.59302E+06, 0.70673E+06, 0.84054E+06,
0.99775E+06, 0.11821E+07, 0.13978E+07, 0.16498E+07, 0.19436E+07,
0.22855E+07, 0.26825E+07, 0.31428E+07, 0.36753E+07, 0.42903E+07,
0.49993E+07, 0.58151E+07, 0.67523E+07, 0.78269E+07, 0.90572E+07,
0.10463E+08, 0.12067E+08, 0.13895E+08, 0.15973E+08, 0.18333E+08,
0.21009E+08, 0.24039E+08, 0.27464E+08, 0.31331E+08, 0.35690E+08,
0.40597E+08, 0.46115E+08, 0.52310E+08, 0.59257E+08, 0.67037E+08,
0.75739E+08, 0.85461E+08, 0.96310E+08, 0.10840E+09, 0.12186E+09,
0.13683E+09, 0.15346E+09, 0.17191E+09, 0.19236E+09, 0.21501E+09,
0.24006E+09, 0.26774E+09, 0.29830E+09, 0.33200E+09, 0.36914E+09,
0.41002E+09, 0.45498E+09, 0.50438E+09, 0.55862E+09, 0.61812E+09,
0.68332E+09, 0.75473E+09, 0.83286E+09, 0.91828E+09, 0.10116E+10,
0.11134E+10, 0.12245E+10, 0.13456E+10, 0.14775E+10, 0.16210E+10,
0.17771E+10, 0.19467E+10, 0.21309E+10, 0.23309E+10, 0.25477E+10,
0.27827E+10, 0.30372E+10, 0.33127E+10, 0.36107E+10, 0.39329E+10,
0.42809E+10, 0.46567E+10, 0.50623E+10, 0.54997E+10, 0.59711E+10,
0.64789E+10, 0.70257E+10, 0.76140E+10, 0.82468E+10, 0.89269E+10,
0.96575E+10, 0.10442E+11, 0.11284E+11, 0.12187E+11, 0.13155E+11,
0.14193E+11, 0.15304E+11, 0.16494E+11, 0.17767E+11, 0.19129E+11,
0.20585E+11, 0.22140E+11, 0.23802E+11, 0.25576E+11, 0.27469E+11,
0.29489E+11, 0.31642E+11, 0.33937E+11, 0.36382E+11, 0.38985E+11,
0.41757E+11])
# --------------- HNO3 156: M = 12, I = 2 --------------------- NOT IN TIPS-2011
M = 12
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- OH 61: M = 13, I = 1 ---------------------
M = 13
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.20066E+02, 0.24774E+02, 0.30309E+02,
0.36357E+02, 0.42745E+02, 0.49371E+02, 0.56168E+02, 0.63093E+02,
0.70116E+02, 0.77217E+02, 0.84380E+02, 0.91594E+02, 0.98850E+02,
0.10614E+03, 0.11346E+03, 0.12081E+03, 0.12818E+03, 0.13557E+03,
0.14298E+03, 0.15041E+03, 0.15785E+03, 0.16531E+03, 0.17278E+03,
0.18027E+03, 0.18778E+03, 0.19530E+03, 0.20284E+03, 0.21040E+03,
0.21797E+03, 0.22556E+03, 0.23318E+03, 0.24082E+03, 0.24848E+03,
0.25617E+03, 0.26389E+03, 0.27163E+03, 0.27941E+03, 0.28721E+03,
0.29505E+03, 0.30292E+03, 0.31084E+03, 0.31878E+03, 0.32677E+03,
0.33480E+03, 0.34287E+03, 0.35099E+03, 0.35915E+03, 0.36736E+03,
0.37561E+03, 0.38391E+03, 0.39227E+03, 0.40067E+03, 0.40913E+03,
0.41764E+03, 0.42620E+03, 0.43482E+03, 0.44350E+03, 0.45223E+03,
0.46102E+03, 0.46987E+03, 0.47878E+03, 0.48775E+03, 0.49679E+03,
0.50588E+03, 0.51503E+03, 0.52425E+03, 0.53354E+03, 0.54288E+03,
0.55229E+03, 0.56177E+03, 0.57132E+03, 0.58092E+03, 0.59060E+03,
0.60035E+03, 0.61016E+03, 0.62004E+03, 0.62999E+03, 0.64001E+03,
0.65010E+03, 0.66025E+03, 0.67049E+03, 0.68078E+03, 0.69115E+03,
0.70160E+03, 0.71211E+03, 0.72269E+03, 0.73335E+03, 0.74408E+03,
0.75488E+03, 0.76576E+03, 0.77671E+03, 0.78773E+03, 0.79883E+03,
0.81000E+03, 0.82124E+03, 0.83256E+03, 0.84396E+03, 0.85542E+03,
0.86696E+03, 0.87858E+03, 0.89027E+03, 0.90204E+03, 0.91389E+03,
0.92580E+03, 0.93781E+03, 0.94988E+03, 0.96203E+03, 0.97425E+03,
0.98656E+03, 0.99893E+03, 0.10114E+04, 0.10239E+04, 0.10365E+04,
0.10492E+04, 0.10620E+04, 0.10748E+04, 0.10878E+04, 0.11007E+04,
0.11138E+04])
# --------------- OH 81: M = 13, I = 2 ---------------------
M = 13
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.20124E+02, 0.24876E+02, 0.30457E+02,
0.36553E+02, 0.42991E+02, 0.49666E+02, 0.56513E+02, 0.63489E+02,
0.70563E+02, 0.77715E+02, 0.84929E+02, 0.92195E+02, 0.99504E+02,
0.10685E+03, 0.11423E+03, 0.12164E+03, 0.12907E+03, 0.13654E+03,
0.14403E+03, 0.15154E+03, 0.15909E+03, 0.16666E+03, 0.17427E+03,
0.18191E+03, 0.18959E+03, 0.19731E+03, 0.20507E+03, 0.21287E+03,
0.22073E+03, 0.22863E+03, 0.23658E+03, 0.24459E+03, 0.25266E+03,
0.26078E+03, 0.26897E+03, 0.27722E+03, 0.28554E+03, 0.29393E+03,
0.30238E+03, 0.31091E+03, 0.31952E+03, 0.32820E+03, 0.33696E+03,
0.34579E+03, 0.35471E+03, 0.36371E+03, 0.37279E+03, 0.38196E+03,
0.39121E+03, 0.40055E+03, 0.40998E+03, 0.41949E+03, 0.42910E+03,
0.43879E+03, 0.44858E+03, 0.45845E+03, 0.46843E+03, 0.47849E+03,
0.48865E+03, 0.49890E+03, 0.50924E+03, 0.51969E+03, 0.53022E+03,
0.54086E+03, 0.55159E+03, 0.56242E+03, 0.57335E+03, 0.58437E+03,
0.59550E+03, 0.60673E+03, 0.61805E+03, 0.62947E+03, 0.64100E+03,
0.65263E+03, 0.66435E+03, 0.67618E+03, 0.68811E+03, 0.70014E+03,
0.71228E+03, 0.72451E+03, 0.73685E+03, 0.74929E+03, 0.76184E+03,
0.77449E+03, 0.78724E+03, 0.80009E+03, 0.81306E+03, 0.82612E+03,
0.83929E+03, 0.85256E+03, 0.86594E+03, 0.87942E+03, 0.89301E+03,
0.90670E+03, 0.92050E+03, 0.93440E+03, 0.94841E+03, 0.96253E+03,
0.97675E+03, 0.99108E+03, 0.10055E+04, 0.10201E+04, 0.10347E+04,
0.10495E+04, 0.10643E+04, 0.10793E+04, 0.10944E+04, 0.11096E+04,
0.11248E+04, 0.11402E+04, 0.11558E+04, 0.11714E+04, 0.11871E+04,
0.12029E+04, 0.12189E+04, 0.12349E+04, 0.12511E+04, 0.12673E+04,
0.12837E+04])
# --------------- OH 62: M = 13, I = 3 ---------------------
M = 13
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.41032E+02, 0.54704E+02, 0.70201E+02,
0.86985E+02, 0.10469E+03, 0.12306E+03, 0.14194E+03, 0.16119E+03,
0.18075E+03, 0.20054E+03, 0.22053E+03, 0.24068E+03, 0.26096E+03,
0.28135E+03, 0.30183E+03, 0.32241E+03, 0.34305E+03, 0.36376E+03,
0.38453E+03, 0.40535E+03, 0.42622E+03, 0.44714E+03, 0.46811E+03,
0.48913E+03, 0.51019E+03, 0.53131E+03, 0.55246E+03, 0.57368E+03,
0.59495E+03, 0.61627E+03, 0.63766E+03, 0.65912E+03, 0.68064E+03,
0.70223E+03, 0.72390E+03, 0.74565E+03, 0.76749E+03, 0.78941E+03,
0.81143E+03, 0.83355E+03, 0.85578E+03, 0.87810E+03, 0.90054E+03,
0.92310E+03, 0.94577E+03, 0.96857E+03, 0.99149E+03, 0.10145E+04,
0.10377E+04, 0.10611E+04, 0.10845E+04, 0.11081E+04, 0.11319E+04,
0.11558E+04, 0.11798E+04, 0.12040E+04, 0.12284E+04, 0.12529E+04,
0.12776E+04, 0.13025E+04, 0.13275E+04, 0.13527E+04, 0.13781E+04,
0.14036E+04, 0.14293E+04, 0.14552E+04, 0.14813E+04, 0.15076E+04,
0.15340E+04, 0.15606E+04, 0.15874E+04, 0.16144E+04, 0.16416E+04,
0.16690E+04, 0.16965E+04, 0.17243E+04, 0.17522E+04, 0.17804E+04,
0.18087E+04, 0.18373E+04, 0.18660E+04, 0.18949E+04, 0.19241E+04,
0.19534E+04, 0.19829E+04, 0.20127E+04, 0.20426E+04, 0.20727E+04,
0.21031E+04, 0.21336E+04, 0.21644E+04, 0.21954E+04, 0.22266E+04,
0.22579E+04, 0.22895E+04, 0.23213E+04, 0.23534E+04, 0.23856E+04,
0.24180E+04, 0.24506E+04, 0.24835E+04, 0.25166E+04, 0.25499E+04,
0.25834E+04, 0.26171E+04, 0.26510E+04, 0.26852E+04, 0.27195E+04,
0.27541E+04, 0.27889E+04, 0.28239E+04, 0.28592E+04, 0.28946E+04,
0.29303E+04, 0.29661E+04, 0.30023E+04, 0.30386E+04, 0.30751E+04,
0.31119E+04])
# --------------- HF 19: M = 14, I = 1 ---------------------
M = 14
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.95958E+01, 0.12933E+02, 0.16295E+02,
0.19666E+02, 0.23043E+02, 0.26425E+02, 0.29809E+02, 0.33195E+02,
0.36584E+02, 0.39974E+02, 0.43366E+02, 0.46759E+02, 0.50154E+02,
0.53550E+02, 0.56947E+02, 0.60346E+02, 0.63746E+02, 0.67148E+02,
0.70550E+02, 0.73955E+02, 0.77361E+02, 0.80769E+02, 0.84179E+02,
0.87591E+02, 0.91006E+02, 0.94424E+02, 0.97846E+02, 0.10127E+03,
0.10470E+03, 0.10813E+03, 0.11157E+03, 0.11502E+03, 0.11847E+03,
0.12193E+03, 0.12540E+03, 0.12888E+03, 0.13236E+03, 0.13586E+03,
0.13936E+03, 0.14288E+03, 0.14641E+03, 0.14995E+03, 0.15351E+03,
0.15708E+03, 0.16066E+03, 0.16426E+03, 0.16788E+03, 0.17151E+03,
0.17516E+03, 0.17882E+03, 0.18251E+03, 0.18621E+03, 0.18994E+03,
0.19368E+03, 0.19745E+03, 0.20123E+03, 0.20504E+03, 0.20887E+03,
0.21272E+03, 0.21659E+03, 0.22049E+03, 0.22441E+03, 0.22836E+03,
0.23233E+03, 0.23632E+03, 0.24034E+03, 0.24439E+03, 0.24846E+03,
0.25255E+03, 0.25668E+03, 0.26083E+03, 0.26501E+03, 0.26921E+03,
0.27344E+03, 0.27770E+03, 0.28199E+03, 0.28631E+03, 0.29066E+03,
0.29503E+03, 0.29944E+03, 0.30387E+03, 0.30833E+03, 0.31282E+03,
0.31735E+03, 0.32190E+03, 0.32648E+03, 0.33110E+03, 0.33574E+03,
0.34042E+03, 0.34512E+03, 0.34986E+03, 0.35463E+03, 0.35943E+03,
0.36426E+03, 0.36913E+03, 0.37402E+03, 0.37895E+03, 0.38391E+03,
0.38891E+03, 0.39393E+03, 0.39899E+03, 0.40408E+03, 0.40921E+03,
0.41436E+03, 0.41955E+03, 0.42478E+03, 0.43004E+03, 0.43533E+03,
0.44065E+03, 0.44601E+03, 0.45140E+03, 0.45683E+03, 0.46229E+03,
0.46779E+03, 0.47332E+03, 0.47888E+03, 0.48448E+03, 0.49011E+03,
0.49578E+03])
# --------------- HF 29: M = 14, I = 2 --------------------- not in TIPS-2011
M = 14
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- HСl 15: M = 15, I = 1 --------------------
M = 15
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.34775E+02, 0.48060E+02, 0.61370E+02,
0.74692E+02, 0.88024E+02, 0.10136E+03, 0.11471E+03, 0.12806E+03,
0.14141E+03, 0.15478E+03, 0.16814E+03, 0.18151E+03, 0.19489E+03,
0.20827E+03, 0.22166E+03, 0.23506E+03, 0.24847E+03, 0.26189E+03,
0.27533E+03, 0.28878E+03, 0.30225E+03, 0.31575E+03, 0.32928E+03,
0.34284E+03, 0.35645E+03, 0.37009E+03, 0.38378E+03, 0.39753E+03,
0.41134E+03, 0.42521E+03, 0.43914E+03, 0.45316E+03, 0.46725E+03,
0.48142E+03, 0.49568E+03, 0.51003E+03, 0.52448E+03, 0.53902E+03,
0.55368E+03, 0.56843E+03, 0.58330E+03, 0.59829E+03, 0.61339E+03,
0.62862E+03, 0.64396E+03, 0.65944E+03, 0.67504E+03, 0.69078E+03,
0.70665E+03, 0.72265E+03, 0.73880E+03, 0.75508E+03, 0.77151E+03,
0.78809E+03, 0.80481E+03, 0.82168E+03, 0.83870E+03, 0.85587E+03,
0.87320E+03, 0.89068E+03, 0.90832E+03, 0.92611E+03, 0.94407E+03,
0.96218E+03, 0.98046E+03, 0.99889E+03, 0.10175E+04, 0.10363E+04,
0.10552E+04, 0.10743E+04, 0.10936E+04, 0.11130E+04, 0.11326E+04,
0.11524E+04, 0.11723E+04, 0.11924E+04, 0.12127E+04, 0.12332E+04,
0.12538E+04, 0.12746E+04, 0.12956E+04, 0.13168E+04, 0.13381E+04,
0.13597E+04, 0.13814E+04, 0.14032E+04, 0.14253E+04, 0.14475E+04,
0.14700E+04, 0.14926E+04, 0.15153E+04, 0.15383E+04, 0.15615E+04,
0.15848E+04, 0.16083E+04, 0.16320E+04, 0.16559E+04, 0.16800E+04,
0.17043E+04, 0.17287E+04, 0.17533E+04, 0.17782E+04, 0.18032E+04,
0.18284E+04, 0.18538E+04, 0.18794E+04, 0.19051E+04, 0.19311E+04,
0.19573E+04, 0.19836E+04, 0.20102E+04, 0.20369E+04, 0.20638E+04,
0.20910E+04, 0.21183E+04, 0.21458E+04, 0.21735E+04, 0.22014E+04,
0.22295E+04])
# --------------- HСl 17: M = 15, I = 2 ---------------------
M = 15
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.34823E+02, 0.48128E+02, 0.61458E+02,
0.74801E+02, 0.88152E+02, 0.10151E+03, 0.11488E+03, 0.12825E+03,
0.14162E+03, 0.15500E+03, 0.16839E+03, 0.18178E+03, 0.19518E+03,
0.20858E+03, 0.22199E+03, 0.23541E+03, 0.24884E+03, 0.26228E+03,
0.27574E+03, 0.28921E+03, 0.30270E+03, 0.31622E+03, 0.32977E+03,
0.34336E+03, 0.35698E+03, 0.37065E+03, 0.38436E+03, 0.39813E+03,
0.41196E+03, 0.42585E+03, 0.43981E+03, 0.45384E+03, 0.46796E+03,
0.48215E+03, 0.49644E+03, 0.51081E+03, 0.52528E+03, 0.53986E+03,
0.55453E+03, 0.56932E+03, 0.58421E+03, 0.59922E+03, 0.61435E+03,
0.62960E+03, 0.64498E+03, 0.66048E+03, 0.67611E+03, 0.69187E+03,
0.70777E+03, 0.72381E+03, 0.73998E+03, 0.75630E+03, 0.77276E+03,
0.78936E+03, 0.80612E+03, 0.82302E+03, 0.84007E+03, 0.85727E+03,
0.87463E+03, 0.89215E+03, 0.90982E+03, 0.92765E+03, 0.94563E+03,
0.96378E+03, 0.98209E+03, 0.10006E+04, 0.10192E+04, 0.10380E+04,
0.10570E+04, 0.10761E+04, 0.10954E+04, 0.11149E+04, 0.11345E+04,
0.11543E+04, 0.11743E+04, 0.11945E+04, 0.12148E+04, 0.12353E+04,
0.12560E+04, 0.12768E+04, 0.12979E+04, 0.13191E+04, 0.13405E+04,
0.13620E+04, 0.13838E+04, 0.14057E+04, 0.14278E+04, 0.14501E+04,
0.14726E+04, 0.14952E+04, 0.15180E+04, 0.15410E+04, 0.15642E+04,
0.15876E+04, 0.16112E+04, 0.16349E+04, 0.16589E+04, 0.16830E+04,
0.17073E+04, 0.17318E+04, 0.17565E+04, 0.17814E+04, 0.18064E+04,
0.18317E+04, 0.18572E+04, 0.18828E+04, 0.19086E+04, 0.19346E+04,
0.19609E+04, 0.19873E+04, 0.20139E+04, 0.20406E+04, 0.20676E+04,
0.20948E+04, 0.21222E+04, 0.21498E+04, 0.21775E+04, 0.22055E+04,
0.22337E+04])
# --------------- HСl 25: M = 15, I = 3 --------------------- not in TIPS-2011
M = 15
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- HСl 27: M = 15, I = 4 --------------------- not in TIPS-2011
M = 15
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- HBr 19: M = 16, I = 1 ---------------------
M = 16
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.42744E+02, 0.59373E+02, 0.76023E+02,
0.92685E+02, 0.10936E+03, 0.12604E+03, 0.14272E+03, 0.15942E+03,
0.17612E+03, 0.19282E+03, 0.20954E+03, 0.22626E+03, 0.24299E+03,
0.25973E+03, 0.27648E+03, 0.29325E+03, 0.31004E+03, 0.32686E+03,
0.34371E+03, 0.36060E+03, 0.37753E+03, 0.39451E+03, 0.41156E+03,
0.42868E+03, 0.44587E+03, 0.46314E+03, 0.48051E+03, 0.49798E+03,
0.51556E+03, 0.53325E+03, 0.55106E+03, 0.56900E+03, 0.58708E+03,
0.60530E+03, 0.62367E+03, 0.64219E+03, 0.66088E+03, 0.67972E+03,
0.69874E+03, 0.71793E+03, 0.73730E+03, 0.75685E+03, 0.77659E+03,
0.79652E+03, 0.81664E+03, 0.83696E+03, 0.85748E+03, 0.87820E+03,
0.89914E+03, 0.92028E+03, 0.94163E+03, 0.96319E+03, 0.98498E+03,
0.10070E+04, 0.10292E+04, 0.10516E+04, 0.10743E+04, 0.10972E+04,
0.11203E+04, 0.11437E+04, 0.11673E+04, 0.11911E+04, 0.12151E+04,
0.12394E+04, 0.12640E+04, 0.12887E+04, 0.13137E+04, 0.13390E+04,
0.13645E+04, 0.13902E+04, 0.14162E+04, 0.14424E+04, 0.14689E+04,
0.14956E+04, 0.15226E+04, 0.15498E+04, 0.15773E+04, 0.16050E+04,
0.16330E+04, 0.16612E+04, 0.16897E+04, 0.17185E+04, 0.17475E+04,
0.17767E+04, 0.18062E+04, 0.18360E+04, 0.18660E+04, 0.18963E+04,
0.19269E+04, 0.19577E+04, 0.19888E+04, 0.20202E+04, 0.20518E+04,
0.20837E+04, 0.21158E+04, 0.21482E+04, 0.21809E+04, 0.22139E+04,
0.22471E+04, 0.22806E+04, 0.23143E+04, 0.23484E+04, 0.23827E+04,
0.24173E+04, 0.24521E+04, 0.24873E+04, 0.25227E+04, 0.25584E+04,
0.25943E+04, 0.26306E+04, 0.26671E+04, 0.27039E+04, 0.27409E+04,
0.27783E+04, 0.28159E+04, 0.28538E+04, 0.28920E+04, 0.29305E+04,
0.29693E+04])
# --------------- HBr 11: M = 16, I = 2 ---------------------
M = 16
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.42756E+02, 0.59390E+02, 0.76045E+02,
0.92713E+02, 0.10939E+03, 0.12607E+03, 0.14277E+03, 0.15947E+03,
0.17617E+03, 0.19288E+03, 0.20960E+03, 0.22633E+03, 0.24306E+03,
0.25981E+03, 0.27656E+03, 0.29334E+03, 0.31014E+03, 0.32696E+03,
0.34381E+03, 0.36071E+03, 0.37764E+03, 0.39464E+03, 0.41169E+03,
0.42881E+03, 0.44601E+03, 0.46329E+03, 0.48066E+03, 0.49813E+03,
0.51572E+03, 0.53341E+03, 0.55123E+03, 0.56918E+03, 0.58727E+03,
0.60549E+03, 0.62387E+03, 0.64240E+03, 0.66109E+03, 0.67994E+03,
0.69896E+03, 0.71816E+03, 0.73754E+03, 0.75710E+03, 0.77684E+03,
0.79678E+03, 0.81691E+03, 0.83724E+03, 0.85776E+03, 0.87850E+03,
0.89943E+03, 0.92058E+03, 0.94194E+03, 0.96352E+03, 0.98531E+03,
0.10073E+04, 0.10295E+04, 0.10520E+04, 0.10747E+04, 0.10976E+04,
0.11207E+04, 0.11441E+04, 0.11677E+04, 0.11915E+04, 0.12156E+04,
0.12399E+04, 0.12644E+04, 0.12892E+04, 0.13142E+04, 0.13395E+04,
0.13650E+04, 0.13907E+04, 0.14167E+04, 0.14429E+04, 0.14694E+04,
0.14961E+04, 0.15231E+04, 0.15504E+04, 0.15778E+04, 0.16056E+04,
0.16336E+04, 0.16618E+04, 0.16903E+04, 0.17191E+04, 0.17481E+04,
0.17773E+04, 0.18069E+04, 0.18367E+04, 0.18667E+04, 0.18970E+04,
0.19276E+04, 0.19584E+04, 0.19895E+04, 0.20209E+04, 0.20525E+04,
0.20844E+04, 0.21166E+04, 0.21490E+04, 0.21817E+04, 0.22147E+04,
0.22479E+04, 0.22814E+04, 0.23152E+04, 0.23492E+04, 0.23835E+04,
0.24181E+04, 0.24530E+04, 0.24882E+04, 0.25236E+04, 0.25593E+04,
0.25952E+04, 0.26315E+04, 0.26680E+04, 0.27048E+04, 0.27419E+04,
0.27793E+04, 0.28169E+04, 0.28549E+04, 0.28931E+04, 0.29316E+04,
0.29703E+04])
# --------------- HBr 29: M = 16, I = 3 --------------------- not in TIPS-2011
M = 16
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- HBr 21: M = 16, I = 4 --------------------- not in TIPS-2011
M = 16
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- HI 17: M = 17, I = 1 ---------------------
M = 17
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.82031E+02, 0.11447E+03, 0.14694E+03,
0.17943E+03, 0.21194E+03, 0.24445E+03, 0.27699E+03, 0.30953E+03,
0.34209E+03, 0.37466E+03, 0.40725E+03, 0.43986E+03, 0.47249E+03,
0.50517E+03, 0.53789E+03, 0.57068E+03, 0.60354E+03, 0.63650E+03,
0.66957E+03, 0.70278E+03, 0.73614E+03, 0.76967E+03, 0.80340E+03,
0.83735E+03, 0.87153E+03, 0.90596E+03, 0.94067E+03, 0.97566E+03,
0.10110E+04, 0.10466E+04, 0.10826E+04, 0.11189E+04, 0.11555E+04,
0.11926E+04, 0.12300E+04, 0.12679E+04, 0.13061E+04, 0.13448E+04,
0.13839E+04, 0.14235E+04, 0.14635E+04, 0.15039E+04, 0.15448E+04,
0.15862E+04, 0.16280E+04, 0.16704E+04, 0.17132E+04, 0.17565E+04,
0.18003E+04, 0.18446E+04, 0.18894E+04, 0.19347E+04, 0.19806E+04,
0.20269E+04, 0.20738E+04, 0.21212E+04, 0.21691E+04, 0.22176E+04,
0.22666E+04, 0.23162E+04, 0.23662E+04, 0.24169E+04, 0.24680E+04,
0.25198E+04, 0.25720E+04, 0.26249E+04, 0.26783E+04, 0.27322E+04,
0.27867E+04, 0.28418E+04, 0.28975E+04, 0.29537E+04, 0.30105E+04,
0.30678E+04, 0.31258E+04, 0.31843E+04, 0.32434E+04, 0.33031E+04,
0.33633E+04, 0.34242E+04, 0.34856E+04, 0.35477E+04, 0.36103E+04,
0.36735E+04, 0.37373E+04, 0.38018E+04, 0.38668E+04, 0.39324E+04,
0.39986E+04, 0.40654E+04, 0.41329E+04, 0.42009E+04, 0.42696E+04,
0.43388E+04, 0.44087E+04, 0.44792E+04, 0.45503E+04, 0.46221E+04,
0.46944E+04, 0.47674E+04, 0.48410E+04, 0.49152E+04, 0.49901E+04,
0.50656E+04, 0.51417E+04, 0.52185E+04, 0.52959E+04, 0.53739E+04,
0.54526E+04, 0.55319E+04, 0.56118E+04, 0.56924E+04, 0.57736E+04,
0.58555E+04, 0.59380E+04, 0.60212E+04, 0.61050E+04, 0.61895E+04,
0.62746E+04])
# --------------- HI 27: M = 17, I = 2 --------------------- not in TIPS-2011
M = 17
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- ClO 56: M = 18, I = 1 ---------------------
M = 18
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.53847E+03, 0.76580E+03, 0.10017E+04,
0.12511E+04, 0.15168E+04, 0.18001E+04, 0.21014E+04, 0.24206E+04,
0.27577E+04, 0.31127E+04, 0.34857E+04, 0.38765E+04, 0.42854E+04,
0.47124E+04, 0.51575E+04, 0.56208E+04, 0.61025E+04, 0.66026E+04,
0.71211E+04, 0.76582E+04, 0.82138E+04, 0.87882E+04, 0.93813E+04,
0.99932E+04, 0.10624E+05, 0.11273E+05, 0.11942E+05, 0.12629E+05,
0.13336E+05, 0.14061E+05, 0.14806E+05, 0.15570E+05, 0.16353E+05,
0.17155E+05, 0.17976E+05, 0.18816E+05, 0.19676E+05, 0.20555E+05,
0.21453E+05, 0.22371E+05, 0.23308E+05, 0.24264E+05, 0.25240E+05,
0.26236E+05, 0.27250E+05, 0.28284E+05, 0.29338E+05, 0.30412E+05,
0.31505E+05, 0.32617E+05, 0.33749E+05, 0.34901E+05, 0.36072E+05,
0.37263E+05, 0.38474E+05, 0.39705E+05, 0.40955E+05, 0.42225E+05,
0.43515E+05, 0.44825E+05, 0.46154E+05, 0.47504E+05, 0.48873E+05,
0.50262E+05, 0.51672E+05, 0.53101E+05, 0.54549E+05, 0.56019E+05,
0.57508E+05, 0.59017E+05, 0.60546E+05, 0.62095E+05, 0.63665E+05,
0.65254E+05, 0.66864E+05, 0.68494E+05, 0.70144E+05, 0.71814E+05,
0.73504E+05, 0.75215E+05, 0.76946E+05, 0.78698E+05, 0.80470E+05,
0.82261E+05, 0.84074E+05, 0.85907E+05, 0.87760E+05, 0.89633E+05,
0.91527E+05, 0.93442E+05, 0.95377E+05, 0.97333E+05, 0.99309E+05,
0.10131E+06, 0.10332E+06, 0.10536E+06, 0.10742E+06, 0.10950E+06,
0.11160E+06, 0.11372E+06, 0.11586E+06, 0.11802E+06, 0.12020E+06,
0.12241E+06, 0.12463E+06, 0.12688E+06, 0.12914E+06, 0.13143E+06,
0.13374E+06, 0.13607E+06, 0.13842E+06, 0.14079E+06, 0.14318E+06,
0.14559E+06, 0.14802E+06, 0.15048E+06, 0.15295E+06, 0.15545E+06,
0.15797E+06])
# --------------- ClO 76: M = 18, I = 2 ---------------------
M = 18
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.54775E+03, 0.77899E+03, 0.10189E+04,
0.12726E+04, 0.15430E+04, 0.18313E+04, 0.21378E+04, 0.24627E+04,
0.28059E+04, 0.31674E+04, 0.35472E+04, 0.39454E+04, 0.43621E+04,
0.47972E+04, 0.52508E+04, 0.57232E+04, 0.62143E+04, 0.67242E+04,
0.72531E+04, 0.78010E+04, 0.83678E+04, 0.89537E+04, 0.95589E+04,
0.10183E+05, 0.10827E+05, 0.11490E+05, 0.12172E+05, 0.12874E+05,
0.13595E+05, 0.14335E+05, 0.15095E+05, 0.15875E+05, 0.16674E+05,
0.17493E+05, 0.18332E+05, 0.19190E+05, 0.20068E+05, 0.20965E+05,
0.21882E+05, 0.22820E+05, 0.23776E+05, 0.24753E+05, 0.25750E+05,
0.26766E+05, 0.27803E+05, 0.28859E+05, 0.29935E+05, 0.31032E+05,
0.32148E+05, 0.33284E+05, 0.34441E+05, 0.35617E+05, 0.36814E+05,
0.38031E+05, 0.39267E+05, 0.40524E+05, 0.41802E+05, 0.43099E+05,
0.44417E+05, 0.45755E+05, 0.47113E+05, 0.48492E+05, 0.49891E+05,
0.51310E+05, 0.52750E+05, 0.54210E+05, 0.55690E+05, 0.57191E+05,
0.58713E+05, 0.60255E+05, 0.61817E+05, 0.63400E+05, 0.65004E+05,
0.66628E+05, 0.68272E+05, 0.69938E+05, 0.71624E+05, 0.73331E+05,
0.75058E+05, 0.76806E+05, 0.78575E+05, 0.80364E+05, 0.82175E+05,
0.84006E+05, 0.85858E+05, 0.87731E+05, 0.89625E+05, 0.91539E+05,
0.93475E+05, 0.95431E+05, 0.97409E+05, 0.99407E+05, 0.10143E+06,
0.10347E+06, 0.10553E+06, 0.10761E+06, 0.10972E+06, 0.11184E+06,
0.11399E+06, 0.11615E+06, 0.11834E+06, 0.12055E+06, 0.12278E+06,
0.12503E+06, 0.12731E+06, 0.12960E+06, 0.13192E+06, 0.13425E+06,
0.13661E+06, 0.13899E+06, 0.14139E+06, 0.14382E+06, 0.14626E+06,
0.14873E+06, 0.15121E+06, 0.15372E+06, 0.15625E+06, 0.15880E+06,
0.16138E+06])
# --------------- OCS 622: M = 19, I = 1 ---------------------
M = 19
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.20609E+03, 0.29199E+03, 0.37861E+03,
0.46737E+03, 0.56024E+03, 0.65929E+03, 0.76649E+03, 0.88361E+03,
0.10123E+04, 0.11541E+04, 0.13105E+04, 0.14829E+04, 0.16728E+04,
0.18818E+04, 0.21113E+04, 0.23629E+04, 0.26383E+04, 0.29391E+04,
0.32672E+04, 0.36245E+04, 0.40128E+04, 0.44343E+04, 0.48911E+04,
0.53853E+04, 0.59193E+04, 0.64956E+04, 0.71166E+04, 0.77849E+04,
0.85033E+04, 0.92746E+04, 0.10102E+05, 0.10988E+05, 0.11936E+05,
0.12949E+05, 0.14032E+05, 0.15186E+05, 0.16416E+05, 0.17726E+05,
0.19120E+05, 0.20601E+05, 0.22173E+05, 0.23842E+05, 0.25611E+05,
0.27484E+05, 0.29468E+05, 0.31566E+05, 0.33783E+05, 0.36124E+05,
0.38595E+05, 0.41202E+05, 0.43949E+05, 0.46842E+05, 0.49888E+05,
0.53092E+05, 0.56460E+05, 0.59999E+05, 0.63716E+05, 0.67616E+05,
0.71708E+05, 0.75997E+05, 0.80491E+05, 0.85197E+05, 0.90124E+05,
0.95278E+05, 0.10067E+06, 0.10630E+06, 0.11219E+06, 0.11833E+06,
0.12475E+06, 0.13144E+06, 0.13842E+06, 0.14570E+06, 0.15328E+06,
0.16117E+06, 0.16940E+06, 0.17795E+06, 0.18686E+06, 0.19611E+06,
0.20574E+06, 0.21574E+06, 0.22613E+06, 0.23692E+06, 0.24813E+06,
0.25975E+06, 0.27182E+06, 0.28433E+06, 0.29730E+06, 0.31074E+06,
0.32467E+06, 0.33909E+06, 0.35403E+06, 0.36950E+06, 0.38551E+06,
0.40207E+06, 0.41920E+06, 0.43691E+06, 0.45522E+06, 0.47415E+06,
0.49370E+06, 0.51390E+06, 0.53476E+06, 0.55629E+06, 0.57852E+06,
0.60146E+06, 0.62513E+06, 0.64954E+06, 0.67471E+06, 0.70067E+06,
0.72742E+06, 0.75499E+06, 0.78339E+06, 0.81265E+06, 0.84279E+06,
0.87381E+06, 0.90576E+06, 0.93863E+06, 0.97246E+06, 0.10073E+07,
0.10431E+07])
# --------------- OCS 624: M = 19, I = 2 ---------------------
M = 19
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.21125E+03, 0.29930E+03, 0.38809E+03,
0.47911E+03, 0.57437E+03, 0.67603E+03, 0.78610E+03, 0.90643E+03,
0.10387E+04, 0.11846E+04, 0.13456E+04, 0.15231E+04, 0.17188E+04,
0.19342E+04, 0.21709E+04, 0.24304E+04, 0.27145E+04, 0.30250E+04,
0.33638E+04, 0.37328E+04, 0.41339E+04, 0.45694E+04, 0.50415E+04,
0.55524E+04, 0.61045E+04, 0.67004E+04, 0.73427E+04, 0.80340E+04,
0.87773E+04, 0.95755E+04, 0.10432E+05, 0.11349E+05, 0.12330E+05,
0.13380E+05, 0.14500E+05, 0.15696E+05, 0.16970E+05, 0.18327E+05,
0.19770E+05, 0.21305E+05, 0.22934E+05, 0.24663E+05, 0.26497E+05,
0.28439E+05, 0.30495E+05, 0.32669E+05, 0.34968E+05, 0.37396E+05,
0.39958E+05, 0.42661E+05, 0.45510E+05, 0.48511E+05, 0.51669E+05,
0.54993E+05, 0.58487E+05, 0.62159E+05, 0.66014E+05, 0.70061E+05,
0.74306E+05, 0.78757E+05, 0.83421E+05, 0.88305E+05, 0.93418E+05,
0.98767E+05, 0.10436E+06, 0.11021E+06, 0.11632E+06, 0.12270E+06,
0.12936E+06, 0.13631E+06, 0.14355E+06, 0.15111E+06, 0.15898E+06,
0.16718E+06, 0.17572E+06, 0.18460E+06, 0.19385E+06, 0.20346E+06,
0.21346E+06, 0.22385E+06, 0.23464E+06, 0.24585E+06, 0.25748E+06,
0.26956E+06, 0.28209E+06, 0.29509E+06, 0.30856E+06, 0.32252E+06,
0.33699E+06, 0.35198E+06, 0.36750E+06, 0.38357E+06, 0.40020E+06,
0.41741E+06, 0.43521E+06, 0.45362E+06, 0.47264E+06, 0.49231E+06,
0.51263E+06, 0.53362E+06, 0.55529E+06, 0.57768E+06, 0.60078E+06,
0.62462E+06, 0.64922E+06, 0.67459E+06, 0.70075E+06, 0.72773E+06,
0.75554E+06, 0.78419E+06, 0.81372E+06, 0.84413E+06, 0.87546E+06,
0.90771E+06, 0.94092E+06, 0.97509E+06, 0.10103E+07, 0.10464E+07,
0.10837E+07])
# --------------- OCS 632: M = 19, I = 3 ---------------------
M = 19
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.41351E+03, 0.58591E+03, 0.76004E+03,
0.93907E+03, 0.11273E+04, 0.13289E+04, 0.15481E+04, 0.17884E+04,
0.20533E+04, 0.23459E+04, 0.26692E+04, 0.30264E+04, 0.34205E+04,
0.38547E+04, 0.43323E+04, 0.48565E+04, 0.54309E+04, 0.60592E+04,
0.67451E+04, 0.74928E+04, 0.83064E+04, 0.91903E+04, 0.10149E+05,
0.11187E+05, 0.12310E+05, 0.13523E+05, 0.14831E+05, 0.16240E+05,
0.17756E+05, 0.19384E+05, 0.21132E+05, 0.23005E+05, 0.25011E+05,
0.27157E+05, 0.29449E+05, 0.31896E+05, 0.34506E+05, 0.37286E+05,
0.40245E+05, 0.43392E+05, 0.46735E+05, 0.50284E+05, 0.54048E+05,
0.58038E+05, 0.62263E+05, 0.66733E+05, 0.71460E+05, 0.76455E+05,
0.81728E+05, 0.87292E+05, 0.93159E+05, 0.99341E+05, 0.10585E+06,
0.11270E+06, 0.11991E+06, 0.12748E+06, 0.13543E+06, 0.14378E+06,
0.15255E+06, 0.16174E+06, 0.17137E+06, 0.18146E+06, 0.19202E+06,
0.20308E+06, 0.21465E+06, 0.22674E+06, 0.23937E+06, 0.25257E+06,
0.26635E+06, 0.28073E+06, 0.29573E+06, 0.31137E+06, 0.32767E+06,
0.34466E+06, 0.36235E+06, 0.38076E+06, 0.39992E+06, 0.41985E+06,
0.44057E+06, 0.46211E+06, 0.48450E+06, 0.50775E+06, 0.53189E+06,
0.55695E+06, 0.58295E+06, 0.60992E+06, 0.63789E+06, 0.66688E+06,
0.69693E+06, 0.72806E+06, 0.76030E+06, 0.79368E+06, 0.82823E+06,
0.86399E+06, 0.90097E+06, 0.93923E+06, 0.97878E+06, 0.10197E+07,
0.10619E+07, 0.11056E+07, 0.11506E+07, 0.11972E+07, 0.12453E+07,
0.12949E+07, 0.13460E+07, 0.13988E+07, 0.14533E+07, 0.15094E+07,
0.15673E+07, 0.16270E+07, 0.16884E+07, 0.17518E+07, 0.18170E+07,
0.18842E+07, 0.19533E+07, 0.20245E+07, 0.20978E+07, 0.21732E+07,
0.22507E+07])
# --------------- OCS 623: M = 19, I = 4 ---------------------
M = 19
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.83485E+03, 0.11828E+04, 0.15337E+04,
0.18934E+04, 0.22697E+04, 0.26712E+04, 0.31059E+04, 0.35809E+04,
0.41030E+04, 0.46785E+04, 0.53133E+04, 0.60135E+04, 0.67850E+04,
0.76338E+04, 0.85663E+04, 0.95888E+04, 0.10708E+05, 0.11931E+05,
0.13265E+05, 0.14718E+05, 0.16298E+05, 0.18012E+05, 0.19870E+05,
0.21881E+05, 0.24054E+05, 0.26399E+05, 0.28926E+05, 0.31646E+05,
0.34570E+05, 0.37710E+05, 0.41077E+05, 0.44685E+05, 0.48545E+05,
0.52672E+05, 0.57078E+05, 0.61780E+05, 0.66790E+05, 0.72125E+05,
0.77801E+05, 0.83833E+05, 0.90239E+05, 0.97036E+05, 0.10424E+06,
0.11188E+06, 0.11996E+06, 0.12850E+06, 0.13754E+06, 0.14708E+06,
0.15715E+06, 0.16777E+06, 0.17896E+06, 0.19076E+06, 0.20317E+06,
0.21623E+06, 0.22996E+06, 0.24438E+06, 0.25953E+06, 0.27543E+06,
0.29211E+06, 0.30959E+06, 0.32791E+06, 0.34710E+06, 0.36718E+06,
0.38820E+06, 0.41017E+06, 0.43314E+06, 0.45713E+06, 0.48219E+06,
0.50835E+06, 0.53564E+06, 0.56409E+06, 0.59376E+06, 0.62468E+06,
0.65688E+06, 0.69041E+06, 0.72530E+06, 0.76161E+06, 0.79937E+06,
0.83862E+06, 0.87941E+06, 0.92179E+06, 0.96581E+06, 0.10115E+07,
0.10589E+07, 0.11081E+07, 0.11591E+07, 0.12120E+07, 0.12669E+07,
0.13237E+07, 0.13825E+07, 0.14435E+07, 0.15066E+07, 0.15718E+07,
0.16394E+07, 0.17093E+07, 0.17815E+07, 0.18562E+07, 0.19334E+07,
0.20132E+07, 0.20956E+07, 0.21807E+07, 0.22685E+07, 0.23592E+07,
0.24528E+07, 0.25494E+07, 0.26490E+07, 0.27517E+07, 0.28576E+07,
0.29667E+07, 0.30792E+07, 0.31951E+07, 0.33145E+07, 0.34374E+07,
0.35640E+07, 0.36943E+07, 0.38285E+07, 0.39665E+07, 0.41085E+07,
0.42546E+07])
# --------------- OCS 822: M = 19, I = 5 ---------------------
M = 19
I = 5
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.21967E+03, 0.31126E+03, 0.40370E+03,
0.49862E+03, 0.59823E+03, 0.70481E+03, 0.82050E+03, 0.94724E+03,
0.10868E+04, 0.12409E+04, 0.14112E+04, 0.15993E+04, 0.18067E+04,
0.20353E+04, 0.22866E+04, 0.25624E+04, 0.28645E+04, 0.31950E+04,
0.35558E+04, 0.39490E+04, 0.43767E+04, 0.48413E+04, 0.53452E+04,
0.58909E+04, 0.64810E+04, 0.71182E+04, 0.78053E+04, 0.85454E+04,
0.93413E+04, 0.10196E+05, 0.11114E+05, 0.12098E+05, 0.13151E+05,
0.14277E+05, 0.15480E+05, 0.16764E+05, 0.18133E+05, 0.19592E+05,
0.21144E+05, 0.22794E+05, 0.24548E+05, 0.26409E+05, 0.28383E+05,
0.30475E+05, 0.32689E+05, 0.35033E+05, 0.37511E+05, 0.40128E+05,
0.42892E+05, 0.45808E+05, 0.48882E+05, 0.52121E+05, 0.55532E+05,
0.59121E+05, 0.62895E+05, 0.66861E+05, 0.71028E+05, 0.75402E+05,
0.79991E+05, 0.84803E+05, 0.89847E+05, 0.95130E+05, 0.10066E+06,
0.10645E+06, 0.11251E+06, 0.11883E+06, 0.12545E+06, 0.13236E+06,
0.13957E+06, 0.14710E+06, 0.15495E+06, 0.16313E+06, 0.17166E+06,
0.18055E+06, 0.18980E+06, 0.19944E+06, 0.20946E+06, 0.21989E+06,
0.23073E+06, 0.24200E+06, 0.25371E+06, 0.26587E+06, 0.27850E+06,
0.29161E+06, 0.30521E+06, 0.31931E+06, 0.33394E+06, 0.34910E+06,
0.36482E+06, 0.38109E+06, 0.39795E+06, 0.41541E+06, 0.43348E+06,
0.45217E+06, 0.47151E+06, 0.49151E+06, 0.51219E+06, 0.53356E+06,
0.55565E+06, 0.57847E+06, 0.60204E+06, 0.62637E+06, 0.65149E+06,
0.67742E+06, 0.70417E+06, 0.73176E+06, 0.76023E+06, 0.78957E+06,
0.81982E+06, 0.85100E+06, 0.88313E+06, 0.91622E+06, 0.95031E+06,
0.98541E+06, 0.10216E+07, 0.10587E+07, 0.10970E+07, 0.11364E+07,
0.11769E+07])
# --------------- H2CO 126: M = 20, I = 2 ---------------------
M = 20
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.25934E+03, 0.43623E+03, 0.64143E+03,
0.87152E+03, 0.11241E+04, 0.13975E+04, 0.16906E+04, 0.20029E+04,
0.23344E+04, 0.26857E+04, 0.30577E+04, 0.34518E+04, 0.38698E+04,
0.43138E+04, 0.47860E+04, 0.52890E+04, 0.58256E+04, 0.63985E+04,
0.70109E+04, 0.76660E+04, 0.83673E+04, 0.91184E+04, 0.99230E+04,
0.10785E+05, 0.11710E+05, 0.12700E+05, 0.13762E+05, 0.14900E+05,
0.16119E+05, 0.17425E+05, 0.18823E+05, 0.20320E+05, 0.21923E+05,
0.23637E+05, 0.25471E+05, 0.27432E+05, 0.29527E+05, 0.31765E+05,
0.34155E+05, 0.36706E+05, 0.39428E+05, 0.42330E+05, 0.45424E+05,
0.48720E+05, 0.52231E+05, 0.55968E+05, 0.59945E+05, 0.64175E+05,
0.68672E+05, 0.73450E+05, 0.78526E+05, 0.83915E+05, 0.89634E+05,
0.95701E+05, 0.10213E+06, 0.10895E+06, 0.11618E+06, 0.12383E+06,
0.13193E+06, 0.14049E+06, 0.14956E+06, 0.15914E+06, 0.16927E+06,
0.17997E+06, 0.19127E+06, 0.20320E+06, 0.21578E+06, 0.22906E+06,
0.24306E+06, 0.25782E+06, 0.27336E+06, 0.28974E+06, 0.30698E+06,
0.32513E+06, 0.34422E+06, 0.36430E+06, 0.38542E+06, 0.40761E+06,
0.43093E+06, 0.45542E+06, 0.48114E+06, 0.50813E+06, 0.53646E+06,
0.56617E+06, 0.59733E+06, 0.63000E+06, 0.66423E+06, 0.70010E+06,
0.73767E+06, 0.77701E+06, 0.81818E+06, 0.86127E+06, 0.90635E+06,
0.95349E+06, 0.10028E+07, 0.10543E+07, 0.11082E+07, 0.11644E+07,
0.12232E+07, 0.12845E+07, 0.13485E+07, 0.14154E+07, 0.14851E+07,
0.15578E+07, 0.16337E+07, 0.17127E+07, 0.17952E+07, 0.18810E+07,
0.19705E+07, 0.20637E+07, 0.21607E+07, 0.22617E+07, 0.23669E+07,
0.24763E+07, 0.25901E+07, 0.27085E+07, 0.28316E+07, 0.29596E+07,
0.30926E+07])
# --------------- H2CO 136: M = 20, I = 2 ---------------------
M = 20
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.53173E+03, 0.89447E+03, 0.13153E+04,
0.17871E+04, 0.23051E+04, 0.28658E+04, 0.34669E+04, 0.41073E+04,
0.47872E+04, 0.55074E+04, 0.62702E+04, 0.70785E+04, 0.79357E+04,
0.88462E+04, 0.98147E+04, 0.10846E+05, 0.11946E+05, 0.13121E+05,
0.14377E+05, 0.15721E+05, 0.17159E+05, 0.18699E+05, 0.20349E+05,
0.22118E+05, 0.24013E+05, 0.26045E+05, 0.28222E+05, 0.30555E+05,
0.33055E+05, 0.35733E+05, 0.38601E+05, 0.41671E+05, 0.44958E+05,
0.48474E+05, 0.52235E+05, 0.56255E+05, 0.60552E+05, 0.65142E+05,
0.70043E+05, 0.75275E+05, 0.80856E+05, 0.86808E+05, 0.93152E+05,
0.99913E+05, 0.10711E+06, 0.11478E+06, 0.12293E+06, 0.13161E+06,
0.14083E+06, 0.15063E+06, 0.16104E+06, 0.17209E+06, 0.18382E+06,
0.19626E+06, 0.20945E+06, 0.22343E+06, 0.23825E+06, 0.25394E+06,
0.27054E+06, 0.28812E+06, 0.30671E+06, 0.32636E+06, 0.34713E+06,
0.36907E+06, 0.39224E+06, 0.41671E+06, 0.44252E+06, 0.46975E+06,
0.49845E+06, 0.52872E+06, 0.56060E+06, 0.59418E+06, 0.62954E+06,
0.66676E+06, 0.70591E+06, 0.74710E+06, 0.79040E+06, 0.83591E+06,
0.88373E+06, 0.93395E+06, 0.98669E+06, 0.10421E+07, 0.11001E+07,
0.11611E+07, 0.12250E+07, 0.12920E+07, 0.13622E+07, 0.14357E+07,
0.15128E+07, 0.15934E+07, 0.16779E+07, 0.17662E+07, 0.18587E+07,
0.19554E+07, 0.20565E+07, 0.21621E+07, 0.22725E+07, 0.23879E+07,
0.25084E+07, 0.26342E+07, 0.27655E+07, 0.29026E+07, 0.30456E+07,
0.31947E+07, 0.33502E+07, 0.35124E+07, 0.36814E+07, 0.38575E+07,
0.40410E+07, 0.42321E+07, 0.44311E+07, 0.46382E+07, 0.48538E+07,
0.50782E+07, 0.53116E+07, 0.55544E+07, 0.58068E+07, 0.60693E+07,
0.63421E+07])
# --------------- H2CO 128: M = 20, I = 3 ---------------------
M = 20
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.27198E+03, 0.45755E+03, 0.67282E+03,
0.91421E+03, 0.11792E+04, 0.14660E+04, 0.17735E+04, 0.21012E+04,
0.24490E+04, 0.28175E+04, 0.32077E+04, 0.36212E+04, 0.40598E+04,
0.45256E+04, 0.50211E+04, 0.55488E+04, 0.61116E+04, 0.67127E+04,
0.73552E+04, 0.80426E+04, 0.87783E+04, 0.95663E+04, 0.10410E+05,
0.11315E+05, 0.12285E+05, 0.13324E+05, 0.14438E+05, 0.15632E+05,
0.16911E+05, 0.18281E+05, 0.19748E+05, 0.21319E+05, 0.23000E+05,
0.24799E+05, 0.26723E+05, 0.28780E+05, 0.30978E+05, 0.33326E+05,
0.35834E+05, 0.38510E+05, 0.41365E+05, 0.44410E+05, 0.47656E+05,
0.51115E+05, 0.54798E+05, 0.58719E+05, 0.62891E+05, 0.67329E+05,
0.72047E+05, 0.77060E+05, 0.82385E+05, 0.88039E+05, 0.94039E+05,
0.10040E+06, 0.10715E+06, 0.11431E+06, 0.12189E+06, 0.12991E+06,
0.13841E+06, 0.14740E+06, 0.15691E+06, 0.16696E+06, 0.17759E+06,
0.18882E+06, 0.20067E+06, 0.21318E+06, 0.22639E+06, 0.24032E+06,
0.25501E+06, 0.27049E+06, 0.28680E+06, 0.30398E+06, 0.32207E+06,
0.34111E+06, 0.36114E+06, 0.38221E+06, 0.40436E+06, 0.42765E+06,
0.45211E+06, 0.47781E+06, 0.50479E+06, 0.53311E+06, 0.56283E+06,
0.59400E+06, 0.62669E+06, 0.66097E+06, 0.69688E+06, 0.73451E+06,
0.77393E+06, 0.81520E+06, 0.85840E+06, 0.90360E+06, 0.95090E+06,
0.10004E+07, 0.10521E+07, 0.11061E+07, 0.11626E+07, 0.12216E+07,
0.12833E+07, 0.13476E+07, 0.14148E+07, 0.14849E+07, 0.15581E+07,
0.16344E+07, 0.17140E+07, 0.17969E+07, 0.18834E+07, 0.19735E+07,
0.20674E+07, 0.21651E+07, 0.22669E+07, 0.23729E+07, 0.24832E+07,
0.25980E+07, 0.27174E+07, 0.28416E+07, 0.29708E+07, 0.31050E+07,
0.32446E+07])
# --------------- HOCl 165: M = 21, I = 1 ---------------------
M = 21
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.17041E+04, 0.28708E+04, 0.42250E+04,
0.57456E+04, 0.74211E+04, 0.92470E+04, 0.11225E+05, 0.13359E+05,
0.15657E+05, 0.18129E+05, 0.20785E+05, 0.23637E+05, 0.26696E+05,
0.29974E+05, 0.33484E+05, 0.37239E+05, 0.41252E+05, 0.45536E+05,
0.50105E+05, 0.54973E+05, 0.60152E+05, 0.65659E+05, 0.71507E+05,
0.77711E+05, 0.84286E+05, 0.91249E+05, 0.98614E+05, 0.10640E+06,
0.11462E+06, 0.12330E+06, 0.13244E+06, 0.14208E+06, 0.15222E+06,
0.16289E+06, 0.17411E+06, 0.18589E+06, 0.19825E+06, 0.21123E+06,
0.22483E+06, 0.23908E+06, 0.25400E+06, 0.26962E+06, 0.28596E+06,
0.30303E+06, 0.32087E+06, 0.33950E+06, 0.35895E+06, 0.37923E+06,
0.40038E+06, 0.42243E+06, 0.44539E+06, 0.46930E+06, 0.49419E+06,
0.52008E+06, 0.54700E+06, 0.57498E+06, 0.60406E+06, 0.63426E+06,
0.66562E+06, 0.69816E+06, 0.73192E+06, 0.76692E+06, 0.80322E+06,
0.84083E+06, 0.87979E+06, 0.92014E+06, 0.96192E+06, 0.10052E+07,
0.10499E+07, 0.10961E+07, 0.11440E+07, 0.11934E+07, 0.12445E+07,
0.12973E+07, 0.13518E+07, 0.14081E+07, 0.14661E+07, 0.15261E+07,
0.15879E+07, 0.16516E+07, 0.17174E+07, 0.17851E+07, 0.18550E+07,
0.19269E+07, 0.20010E+07, 0.20773E+07, 0.21559E+07, 0.22367E+07,
0.23200E+07, 0.24056E+07, 0.24936E+07, 0.25842E+07, 0.26773E+07,
0.27730E+07, 0.28714E+07, 0.29724E+07, 0.30763E+07, 0.31829E+07,
0.32924E+07, 0.34049E+07, 0.35203E+07, 0.36387E+07, 0.37603E+07,
0.38850E+07, 0.40129E+07, 0.41441E+07, 0.42786E+07, 0.44165E+07,
0.45579E+07, 0.47028E+07, 0.48512E+07, 0.50033E+07, 0.51592E+07,
0.53187E+07, 0.54822E+07, 0.56495E+07, 0.58208E+07, 0.59961E+07,
0.61755E+07])
# --------------- HOCl 167: M = 21, I = 2 ---------------------
M = 21
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.17342E+04, 0.29215E+04, 0.42998E+04,
0.58473E+04, 0.75524E+04, 0.94107E+04, 0.11423E+05, 0.13595E+05,
0.15935E+05, 0.18450E+05, 0.21154E+05, 0.24056E+05, 0.27168E+05,
0.30505E+05, 0.34077E+05, 0.37899E+05, 0.41983E+05, 0.46343E+05,
0.50993E+05, 0.55947E+05, 0.61218E+05, 0.66822E+05, 0.72774E+05,
0.79088E+05, 0.85780E+05, 0.92866E+05, 0.10036E+06, 0.10829E+06,
0.11665E+06, 0.12548E+06, 0.13479E+06, 0.14460E+06, 0.15492E+06,
0.16578E+06, 0.17719E+06, 0.18918E+06, 0.20177E+06, 0.21497E+06,
0.22881E+06, 0.24332E+06, 0.25851E+06, 0.27440E+06, 0.29102E+06,
0.30840E+06, 0.32656E+06, 0.34552E+06, 0.36531E+06, 0.38595E+06,
0.40748E+06, 0.42991E+06, 0.45328E+06, 0.47762E+06, 0.50295E+06,
0.52929E+06, 0.55669E+06, 0.58517E+06, 0.61477E+06, 0.64550E+06,
0.67741E+06, 0.71053E+06, 0.74489E+06, 0.78052E+06, 0.81745E+06,
0.85573E+06, 0.89539E+06, 0.93645E+06, 0.97897E+06, 0.10230E+07,
0.10685E+07, 0.11156E+07, 0.11643E+07, 0.12146E+07, 0.12666E+07,
0.13203E+07, 0.13757E+07, 0.14330E+07, 0.14921E+07, 0.15531E+07,
0.16160E+07, 0.16809E+07, 0.17478E+07, 0.18168E+07, 0.18878E+07,
0.19611E+07, 0.20365E+07, 0.21141E+07, 0.21941E+07, 0.22764E+07,
0.23611E+07, 0.24482E+07, 0.25378E+07, 0.26300E+07, 0.27248E+07,
0.28222E+07, 0.29223E+07, 0.30251E+07, 0.31308E+07, 0.32393E+07,
0.33508E+07, 0.34652E+07, 0.35827E+07, 0.37032E+07, 0.38269E+07,
0.39539E+07, 0.40840E+07, 0.42176E+07, 0.43545E+07, 0.44948E+07,
0.46387E+07, 0.47861E+07, 0.49372E+07, 0.50920E+07, 0.52506E+07,
0.54130E+07, 0.55793E+07, 0.57496E+07, 0.59239E+07, 0.61024E+07,
0.62850E+07])
# --------------- N2 44: M = 22, I = 1 ---------------------
M = 22
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.95487E+02, 0.13466E+03, 0.17386E+03,
0.21307E+03, 0.25230E+03, 0.29154E+03, 0.33080E+03, 0.37008E+03,
0.40937E+03, 0.44868E+03, 0.48800E+03, 0.52736E+03, 0.56674E+03,
0.60616E+03, 0.64562E+03, 0.68515E+03, 0.72475E+03, 0.76445E+03,
0.80426E+03, 0.84420E+03, 0.88430E+03, 0.92457E+03, 0.96505E+03,
0.10057E+04, 0.10467E+04, 0.10879E+04, 0.11293E+04, 0.11711E+04,
0.12132E+04, 0.12556E+04, 0.12984E+04, 0.13416E+04, 0.13851E+04,
0.14291E+04, 0.14734E+04, 0.15182E+04, 0.15635E+04, 0.16091E+04,
0.16553E+04, 0.17019E+04, 0.17490E+04, 0.17965E+04, 0.18446E+04,
0.18932E+04, 0.19422E+04, 0.19918E+04, 0.20419E+04, 0.20926E+04,
0.21437E+04, 0.21954E+04, 0.22477E+04, 0.23004E+04, 0.23538E+04,
0.24077E+04, 0.24621E+04, 0.25171E+04, 0.25727E+04, 0.26288E+04,
0.26856E+04, 0.27428E+04, 0.28007E+04, 0.28591E+04, 0.29181E+04,
0.29777E+04, 0.30379E+04, 0.30986E+04, 0.31600E+04, 0.32219E+04,
0.32844E+04, 0.33475E+04, 0.34112E+04, 0.34755E+04, 0.35404E+04,
0.36059E+04, 0.36720E+04, 0.37387E+04, 0.38060E+04, 0.38739E+04,
0.39424E+04, 0.40115E+04, 0.40812E+04, 0.41515E+04, 0.42224E+04,
0.42939E+04, 0.43661E+04, 0.44388E+04, 0.45122E+04, 0.45861E+04,
0.46607E+04, 0.47359E+04, 0.48117E+04, 0.48882E+04, 0.49652E+04,
0.50428E+04, 0.51211E+04, 0.52000E+04, 0.52795E+04, 0.53596E+04,
0.54404E+04, 0.55217E+04, 0.56037E+04, 0.56863E+04, 0.57695E+04,
0.58533E+04, 0.59378E+04, 0.60229E+04, 0.61086E+04, 0.61950E+04,
0.62819E+04, 0.63695E+04, 0.64577E+04, 0.65465E+04, 0.66360E+04,
0.67261E+04, 0.68168E+04, 0.69081E+04, 0.70001E+04, 0.70927E+04,
0.71859E+04])
# --------------- N2 45: M = 22, I = 2 --------------------- not in TIPS-2011
M = 22
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- HCN 124: M = 23, I = 1 ---------------------
M = 23
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.17143E+03, 0.24209E+03, 0.31285E+03,
0.38392E+03, 0.45582E+03, 0.52929E+03, 0.60515E+03, 0.68424E+03,
0.76731E+03, 0.85505E+03, 0.94805E+03, 0.10468E+04, 0.11519E+04,
0.12637E+04, 0.13826E+04, 0.15090E+04, 0.16435E+04, 0.17863E+04,
0.19378E+04, 0.20985E+04, 0.22689E+04, 0.24492E+04, 0.26401E+04,
0.28418E+04, 0.30550E+04, 0.32801E+04, 0.35176E+04, 0.37680E+04,
0.40318E+04, 0.43097E+04, 0.46021E+04, 0.49097E+04, 0.52330E+04,
0.55727E+04, 0.59294E+04, 0.63038E+04, 0.66964E+04, 0.71081E+04,
0.75396E+04, 0.79915E+04, 0.84646E+04, 0.89596E+04, 0.94774E+04,
0.10019E+05, 0.10585E+05, 0.11176E+05, 0.11793E+05, 0.12437E+05,
0.13108E+05, 0.13809E+05, 0.14540E+05, 0.15301E+05, 0.16094E+05,
0.16919E+05, 0.17779E+05, 0.18673E+05, 0.19603E+05, 0.20570E+05,
0.21575E+05, 0.22619E+05, 0.23704E+05, 0.24831E+05, 0.26000E+05,
0.27213E+05, 0.28472E+05, 0.29778E+05, 0.31131E+05, 0.32534E+05,
0.33987E+05, 0.35493E+05, 0.37052E+05, 0.38666E+05, 0.40336E+05,
0.42064E+05, 0.43852E+05, 0.45701E+05, 0.47612E+05, 0.49587E+05,
0.51629E+05, 0.53738E+05, 0.55916E+05, 0.58165E+05, 0.60486E+05,
0.62883E+05, 0.65355E+05, 0.67905E+05, 0.70536E+05, 0.73249E+05,
0.76045E+05, 0.78927E+05, 0.81897E+05, 0.84957E+05, 0.88108E+05,
0.91354E+05, 0.94696E+05, 0.98136E+05, 0.10168E+06, 0.10532E+06,
0.10907E+06, 0.11292E+06, 0.11689E+06, 0.12096E+06, 0.12516E+06,
0.12946E+06, 0.13389E+06, 0.13844E+06, 0.14311E+06, 0.14791E+06,
0.15284E+06, 0.15790E+06, 0.16310E+06, 0.16843E+06, 0.17391E+06,
0.17953E+06, 0.18529E+06, 0.19120E+06, 0.19726E+06, 0.20348E+06,
0.20986E+06])
# --------------- HCN 134: M = 23, I = 2 ---------------------
M = 23
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.35186E+03, 0.49693E+03, 0.64221E+03,
0.78815E+03, 0.93585E+03, 0.10868E+04, 0.12428E+04, 0.14056E+04,
0.15766E+04, 0.17574E+04, 0.19491E+04, 0.21528E+04, 0.23695E+04,
0.26002E+04, 0.28457E+04, 0.31068E+04, 0.33845E+04, 0.36795E+04,
0.39926E+04, 0.43249E+04, 0.46770E+04, 0.50500E+04, 0.54447E+04,
0.58621E+04, 0.63032E+04, 0.67690E+04, 0.72606E+04, 0.77789E+04,
0.83252E+04, 0.89005E+04, 0.95062E+04, 0.10143E+05, 0.10813E+05,
0.11517E+05, 0.12256E+05, 0.13032E+05, 0.13846E+05, 0.14699E+05,
0.15593E+05, 0.16530E+05, 0.17511E+05, 0.18538E+05, 0.19612E+05,
0.20734E+05, 0.21908E+05, 0.23134E+05, 0.24414E+05, 0.25750E+05,
0.27145E+05, 0.28599E+05, 0.30115E+05, 0.31694E+05, 0.33340E+05,
0.35054E+05, 0.36838E+05, 0.38694E+05, 0.40625E+05, 0.42633E+05,
0.44720E+05, 0.46889E+05, 0.49142E+05, 0.51481E+05, 0.53910E+05,
0.56430E+05, 0.59045E+05, 0.61757E+05, 0.64568E+05, 0.67482E+05,
0.70502E+05, 0.73630E+05, 0.76869E+05, 0.80223E+05, 0.83694E+05,
0.87285E+05, 0.91000E+05, 0.94843E+05, 0.98815E+05, 0.10292E+06,
0.10716E+06, 0.11155E+06, 0.11608E+06, 0.12075E+06, 0.12558E+06,
0.13056E+06, 0.13570E+06, 0.14100E+06, 0.14647E+06, 0.15211E+06,
0.15793E+06, 0.16392E+06, 0.17009E+06, 0.17646E+06, 0.18301E+06,
0.18976E+06, 0.19671E+06, 0.20387E+06, 0.21123E+06, 0.21881E+06,
0.22660E+06, 0.23462E+06, 0.24287E+06, 0.25135E+06, 0.26007E+06,
0.26903E+06, 0.27824E+06, 0.28771E+06, 0.29743E+06, 0.30742E+06,
0.31767E+06, 0.32820E+06, 0.33901E+06, 0.35011E+06, 0.36150E+06,
0.37319E+06, 0.38518E+06, 0.39749E+06, 0.41010E+06, 0.42304E+06,
0.43631E+06])
# --------------- HCN 135: M = 23, I = 3 ---------------------
M = 23
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.11863E+03, 0.16755E+03, 0.21653E+03,
0.26576E+03, 0.31559E+03, 0.36656E+03, 0.41926E+03, 0.47428E+03,
0.53214E+03, 0.59333E+03, 0.65824E+03, 0.72727E+03, 0.80074E+03,
0.87898E+03, 0.96227E+03, 0.10509E+04, 0.11452E+04, 0.12454E+04,
0.13518E+04, 0.14647E+04, 0.15844E+04, 0.17112E+04, 0.18455E+04,
0.19875E+04, 0.21377E+04, 0.22962E+04, 0.24636E+04, 0.26402E+04,
0.28263E+04, 0.30224E+04, 0.32289E+04, 0.34461E+04, 0.36745E+04,
0.39145E+04, 0.41667E+04, 0.44314E+04, 0.47092E+04, 0.50005E+04,
0.53059E+04, 0.56259E+04, 0.59609E+04, 0.63116E+04, 0.66785E+04,
0.70622E+04, 0.74633E+04, 0.78823E+04, 0.83200E+04, 0.87769E+04,
0.92536E+04, 0.97509E+04, 0.10269E+05, 0.10810E+05, 0.11373E+05,
0.11959E+05, 0.12570E+05, 0.13205E+05, 0.13866E+05, 0.14554E+05,
0.15268E+05, 0.16011E+05, 0.16782E+05, 0.17583E+05, 0.18415E+05,
0.19279E+05, 0.20174E+05, 0.21103E+05, 0.22067E+05, 0.23065E+05,
0.24100E+05, 0.25172E+05, 0.26282E+05, 0.27432E+05, 0.28622E+05,
0.29853E+05, 0.31127E+05, 0.32445E+05, 0.33807E+05, 0.35215E+05,
0.36670E+05, 0.38174E+05, 0.39727E+05, 0.41330E+05, 0.42986E+05,
0.44695E+05, 0.46459E+05, 0.48278E+05, 0.50155E+05, 0.52091E+05,
0.54086E+05, 0.56143E+05, 0.58263E+05, 0.60447E+05, 0.62696E+05,
0.65013E+05, 0.67399E+05, 0.69856E+05, 0.72384E+05, 0.74986E+05,
0.77663E+05, 0.80416E+05, 0.83249E+05, 0.86161E+05, 0.89156E+05,
0.92233E+05, 0.95397E+05, 0.98648E+05, 0.10199E+06, 0.10542E+06,
0.10894E+06, 0.11256E+06, 0.11627E+06, 0.12009E+06, 0.12400E+06,
0.12802E+06, 0.13214E+06, 0.13636E+06, 0.14070E+06, 0.14515E+06,
0.14971E+06])
# --------------- CH3Cl 215: M = 24, I = 1 ---------------------
M = 24
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.50529E+04, 0.85123E+04, 0.12528E+05,
0.17036E+05, 0.22005E+05, 0.27429E+05, 0.33325E+05, 0.39734E+05,
0.46713E+05, 0.54336E+05, 0.62690E+05, 0.71876E+05, 0.82006E+05,
0.93204E+05, 0.10560E+06, 0.11936E+06, 0.13463E+06, 0.15158E+06,
0.17043E+06, 0.19137E+06, 0.21464E+06, 0.24049E+06, 0.26920E+06,
0.30107E+06, 0.33642E+06, 0.37563E+06, 0.41907E+06, 0.46719E+06,
0.52045E+06, 0.57936E+06, 0.64448E+06, 0.71641E+06, 0.79582E+06,
0.88341E+06, 0.97997E+06, 0.10863E+07, 0.12034E+07, 0.13323E+07,
0.14739E+07, 0.16295E+07, 0.18003E+07, 0.19877E+07, 0.21932E+07,
0.24183E+07, 0.26649E+07, 0.29346E+07, 0.32296E+07, 0.35519E+07,
0.39039E+07, 0.42881E+07, 0.47072E+07, 0.51639E+07, 0.56615E+07,
0.62032E+07, 0.67926E+07, 0.74335E+07, 0.81299E+07, 0.88862E+07,
0.97071E+07, 0.10598E+08, 0.11563E+08, 0.12609E+08, 0.13742E+08,
0.14968E+08, 0.16294E+08, 0.17728E+08, 0.19277E+08, 0.20950E+08,
0.22756E+08, 0.24704E+08, 0.26805E+08, 0.29069E+08, 0.31507E+08,
0.34132E+08, 0.36957E+08, 0.39995E+08, 0.43260E+08, 0.46769E+08,
0.50538E+08, 0.54583E+08, 0.58923E+08, 0.63578E+08, 0.68568E+08,
0.73914E+08, 0.79640E+08, 0.85770E+08, 0.92329E+08, 0.99345E+08,
0.10685E+09, 0.11486E+09, 0.12342E+09, 0.13257E+09, 0.14233E+09,
0.15274E+09, 0.16384E+09, 0.17568E+09, 0.18829E+09, 0.20173E+09,
0.21604E+09, 0.23127E+09, 0.24748E+09, 0.26471E+09, 0.28304E+09,
0.30252E+09, 0.32322E+09, 0.34520E+09, 0.36853E+09, 0.39330E+09,
0.41958E+09, 0.44745E+09, 0.47701E+09, 0.50833E+09, 0.54151E+09,
0.57667E+09, 0.61389E+09, 0.65329E+09, 0.69498E+09, 0.73909E+09,
0.78573E+09])
# --------------- CH3Cl 217: M = 24, I = 2 ---------------------
M = 24
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.51327E+04, 0.86469E+04, 0.12726E+05,
0.17306E+05, 0.22354E+05, 0.27863E+05, 0.33853E+05, 0.40364E+05,
0.47453E+05, 0.55197E+05, 0.63684E+05, 0.73016E+05, 0.83306E+05,
0.94681E+05, 0.10728E+06, 0.12125E+06, 0.13676E+06, 0.15399E+06,
0.17313E+06, 0.19441E+06, 0.21804E+06, 0.24430E+06, 0.27347E+06,
0.30584E+06, 0.34176E+06, 0.38158E+06, 0.42572E+06, 0.47460E+06,
0.52871E+06, 0.58855E+06, 0.65471E+06, 0.72778E+06, 0.80844E+06,
0.89743E+06, 0.99552E+06, 0.11036E+07, 0.12225E+07, 0.13534E+07,
0.14973E+07, 0.16553E+07, 0.18289E+07, 0.20193E+07, 0.22280E+07,
0.24567E+07, 0.27072E+07, 0.29812E+07, 0.32808E+07, 0.36083E+07,
0.39659E+07, 0.43562E+07, 0.47819E+07, 0.52459E+07, 0.57514E+07,
0.63017E+07, 0.69005E+07, 0.75515E+07, 0.82590E+07, 0.90273E+07,
0.98613E+07, 0.10766E+08, 0.11747E+08, 0.12809E+08, 0.13960E+08,
0.15206E+08, 0.16553E+08, 0.18010E+08, 0.19584E+08, 0.21283E+08,
0.23118E+08, 0.25097E+08, 0.27231E+08, 0.29531E+08, 0.32008E+08,
0.34674E+08, 0.37544E+08, 0.40630E+08, 0.43948E+08, 0.47513E+08,
0.51341E+08, 0.55451E+08, 0.59860E+08, 0.64589E+08, 0.69658E+08,
0.75089E+08, 0.80906E+08, 0.87134E+08, 0.93797E+08, 0.10092E+09,
0.10854E+09, 0.11669E+09, 0.12539E+09, 0.13467E+09, 0.14459E+09,
0.15517E+09, 0.16645E+09, 0.17847E+09, 0.19129E+09, 0.20494E+09,
0.21948E+09, 0.23495E+09, 0.25141E+09, 0.26893E+09, 0.28754E+09,
0.30733E+09, 0.32836E+09, 0.35069E+09, 0.37440E+09, 0.39956E+09,
0.42626E+09, 0.45457E+09, 0.48460E+09, 0.51642E+09, 0.55013E+09,
0.58585E+09, 0.62366E+09, 0.66369E+09, 0.70605E+09, 0.75085E+09,
0.79824E+09])
# --------------- H2O2 1661: M = 25, I = 1 ---------------------
M = 25
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.62392E+03, 0.10958E+04, 0.16692E+04,
0.23492E+04, 0.31427E+04, 0.40574E+04, 0.51014E+04, 0.62840E+04,
0.76157E+04, 0.91085E+04, 0.10776E+05, 0.12633E+05, 0.14696E+05,
0.16983E+05, 0.19515E+05, 0.22312E+05, 0.25396E+05, 0.28792E+05,
0.32526E+05, 0.36625E+05, 0.41118E+05, 0.46036E+05, 0.51410E+05,
0.57275E+05, 0.63667E+05, 0.70623E+05, 0.78185E+05, 0.86394E+05,
0.95295E+05, 0.10493E+06, 0.11536E+06, 0.12662E+06, 0.13878E+06,
0.15188E+06, 0.16600E+06, 0.18118E+06, 0.19750E+06, 0.21503E+06,
0.23383E+06, 0.25398E+06, 0.27556E+06, 0.29864E+06, 0.32333E+06,
0.34970E+06, 0.37784E+06, 0.40786E+06, 0.43985E+06, 0.47392E+06,
0.51018E+06, 0.54874E+06, 0.58972E+06, 0.63324E+06, 0.67943E+06,
0.72843E+06, 0.78037E+06, 0.83540E+06, 0.89366E+06, 0.95530E+06,
0.10205E+07, 0.10894E+07, 0.11622E+07, 0.12391E+07, 0.13202E+07,
0.14057E+07, 0.14959E+07, 0.15909E+07, 0.16910E+07, 0.17963E+07,
0.19072E+07, 0.20237E+07, 0.21463E+07, 0.22750E+07, 0.24102E+07,
0.25522E+07, 0.27012E+07, 0.28575E+07, 0.30213E+07, 0.31931E+07,
0.33730E+07, 0.35615E+07, 0.37588E+07, 0.39653E+07, 0.41813E+07,
0.44072E+07, 0.46433E+07, 0.48901E+07, 0.51479E+07, 0.54171E+07,
0.56982E+07, 0.59915E+07, 0.62976E+07, 0.66167E+07, 0.69495E+07,
0.72963E+07, 0.76577E+07, 0.80342E+07, 0.84262E+07, 0.88343E+07,
0.92591E+07, 0.97011E+07, 0.10161E+08, 0.10639E+08, 0.11136E+08,
0.11652E+08, 0.12189E+08, 0.12746E+08, 0.13325E+08, 0.13926E+08,
0.14550E+08, 0.15198E+08, 0.15870E+08, 0.16566E+08, 0.17289E+08,
0.18038E+08, 0.18814E+08, 0.19619E+08, 0.20452E+08, 0.21315E+08,
0.22209E+08])
# --------------- C2H2 1221: M = 26, I = 1 ---------------------
M = 26
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.71617E+02, 0.10121E+03, 0.13092E+03,
0.16104E+03, 0.19218E+03, 0.22509E+03, 0.26062E+03, 0.29959E+03,
0.34281E+03, 0.39103E+03, 0.44503E+03, 0.50558E+03, 0.57346E+03,
0.64950E+03, 0.73457E+03, 0.82960E+03, 0.93557E+03, 0.10535E+04,
0.11846E+04, 0.13301E+04, 0.14911E+04, 0.16692E+04, 0.18658E+04,
0.20825E+04, 0.23211E+04, 0.25833E+04, 0.28711E+04, 0.31867E+04,
0.35323E+04, 0.39102E+04, 0.43230E+04, 0.47735E+04, 0.52645E+04,
0.57991E+04, 0.63807E+04, 0.70127E+04, 0.76988E+04, 0.84430E+04,
0.92495E+04, 0.10123E+05, 0.11067E+05, 0.12088E+05, 0.13191E+05,
0.14381E+05, 0.15664E+05, 0.17047E+05, 0.18536E+05, 0.20137E+05,
0.21859E+05, 0.23710E+05, 0.25696E+05, 0.27827E+05, 0.30112E+05,
0.32561E+05, 0.35183E+05, 0.37990E+05, 0.40991E+05, 0.44199E+05,
0.47626E+05, 0.51285E+05, 0.55189E+05, 0.59353E+05, 0.63791E+05,
0.68518E+05, 0.73551E+05, 0.78908E+05, 0.84604E+05, 0.90661E+05,
0.97095E+05, 0.10393E+06, 0.11118E+06, 0.11888E+06, 0.12704E+06,
0.13569E+06, 0.14486E+06, 0.15457E+06, 0.16485E+06, 0.17572E+06,
0.18722E+06, 0.19938E+06, 0.21223E+06, 0.22581E+06, 0.24014E+06,
0.25527E+06, 0.27123E+06, 0.28807E+06, 0.30582E+06, 0.32452E+06,
0.34423E+06, 0.36498E+06, 0.38683E+06, 0.40982E+06, 0.43401E+06,
0.45944E+06, 0.48618E+06, 0.51428E+06, 0.54380E+06, 0.57480E+06,
0.60735E+06, 0.64151E+06, 0.67735E+06, 0.71494E+06, 0.75436E+06,
0.79568E+06, 0.83898E+06, 0.88434E+06, 0.93184E+06, 0.98158E+06,
0.10336E+07, 0.10881E+07, 0.11451E+07, 0.12047E+07, 0.12670E+07,
0.13321E+07, 0.14002E+07, 0.14713E+07, 0.15455E+07, 0.16231E+07,
0.17040E+07])
# --------------- C2H2 1231: M = 26, I = 2 ---------------------
M = 26
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.28647E+03, 0.40486E+03, 0.52369E+03,
0.64419E+03, 0.76874E+03, 0.90040E+03, 0.10425E+04, 0.11984E+04,
0.13713E+04, 0.15642E+04, 0.17802E+04, 0.20223E+04, 0.22939E+04,
0.25981E+04, 0.29384E+04, 0.33185E+04, 0.37424E+04, 0.42142E+04,
0.47386E+04, 0.53203E+04, 0.59646E+04, 0.66769E+04, 0.74633E+04,
0.83302E+04, 0.92845E+04, 0.10333E+05, 0.11485E+05, 0.12747E+05,
0.14129E+05, 0.15641E+05, 0.17292E+05, 0.19094E+05, 0.21058E+05,
0.23197E+05, 0.25523E+05, 0.28051E+05, 0.30796E+05, 0.33773E+05,
0.36999E+05, 0.40492E+05, 0.44270E+05, 0.48354E+05, 0.52765E+05,
0.57525E+05, 0.62658E+05, 0.68189E+05, 0.74144E+05, 0.80551E+05,
0.87439E+05, 0.94840E+05, 0.10279E+06, 0.11131E+06, 0.12045E+06,
0.13025E+06, 0.14074E+06, 0.15196E+06, 0.16397E+06, 0.17680E+06,
0.19051E+06, 0.20514E+06, 0.22076E+06, 0.23742E+06, 0.25517E+06,
0.27408E+06, 0.29421E+06, 0.31564E+06, 0.33842E+06, 0.36265E+06,
0.38839E+06, 0.41572E+06, 0.44474E+06, 0.47553E+06, 0.50818E+06,
0.54278E+06, 0.57945E+06, 0.61829E+06, 0.65940E+06, 0.70289E+06,
0.74890E+06, 0.79754E+06, 0.84894E+06, 0.90324E+06, 0.96057E+06,
0.10211E+07, 0.10849E+07, 0.11523E+07, 0.12233E+07, 0.12981E+07,
0.13769E+07, 0.14599E+07, 0.15473E+07, 0.16393E+07, 0.17361E+07,
0.18378E+07, 0.19447E+07, 0.20571E+07, 0.21752E+07, 0.22992E+07,
0.24294E+07, 0.25661E+07, 0.27094E+07, 0.28598E+07, 0.30175E+07,
0.31828E+07, 0.33560E+07, 0.35374E+07, 0.37274E+07, 0.39264E+07,
0.41346E+07, 0.43525E+07, 0.45805E+07, 0.48188E+07, 0.50681E+07,
0.53286E+07, 0.56008E+07, 0.58852E+07, 0.61823E+07, 0.64924E+07,
0.68162E+07])
# --------------- C2H2 1222: M = 26, I = 3 ---------------------
M = 26
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.24843E+03, 0.35373E+03, 0.45997E+03,
0.56930E+03, 0.68497E+03, 0.81065E+03, 0.94999E+03, 0.11065E+04,
0.12837E+04, 0.14848E+04, 0.17135E+04, 0.19731E+04, 0.22675E+04,
0.26205E+04, 0.29999E+04, 0.34276E+04, 0.39086E+04, 0.44486E+04,
0.50533E+04, 0.57294E+04, 0.64837E+04, 0.73237E+04, 0.82576E+04,
0.92941E+04, 0.10443E+05, 0.11714E+05, 0.13117E+05, 0.14666E+05,
0.16373E+05, 0.18250E+05, 0.20313E+05, 0.22578E+05, 0.25060E+05,
0.27777E+05, 0.30750E+05, 0.33997E+05, 0.37541E+05, 0.41405E+05,
0.45614E+05, 0.50192E+05, 0.55170E+05, 0.60576E+05, 0.66441E+05,
0.72799E+05, 0.79686E+05, 0.87140E+05, 0.95199E+05, 0.10391E+06,
0.11331E+06, 0.12345E+06, 0.13438E+06, 0.14615E+06, 0.15882E+06,
0.17245E+06, 0.18710E+06, 0.20283E+06, 0.21972E+06, 0.23783E+06,
0.25724E+06, 0.27804E+06, 0.30030E+06, 0.32411E+06, 0.34958E+06,
0.37679E+06, 0.40585E+06, 0.43686E+06, 0.46994E+06, 0.50521E+06,
0.54280E+06, 0.58282E+06, 0.62542E+06, 0.67074E+06, 0.71892E+06,
0.77013E+06, 0.82453E+06, 0.88228E+06, 0.94356E+06, 0.10086E+07,
0.10775E+07, 0.11505E+07, 0.12279E+07, 0.13098E+07, 0.13964E+07,
0.14881E+07, 0.15850E+07, 0.16875E+07, 0.17957E+07, 0.19100E+07,
0.20307E+07, 0.21580E+07, 0.22923E+07, 0.24339E+07, 0.25831E+07,
0.27404E+07, 0.29060E+07, 0.30803E+07, 0.32638E+07, 0.34568E+07,
0.36598E+07, 0.38733E+07, 0.40976E+07, 0.43332E+07, 0.45807E+07,
0.48406E+07, 0.51133E+07, 0.53995E+07, 0.56997E+07, 0.60144E+07,
0.63444E+07, 0.66901E+07, 0.70524E+07, 0.74317E+07, 0.78289E+07,
0.82447E+07, 0.86797E+07, 0.91348E+07, 0.96108E+07, 0.10108E+08,
0.10629E+08])
# --------------- C2H6 1221: M = 27, I = 1 ---------------------
M = 27
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.47267E+04, 0.80011E+04, 0.11928E+05,
0.16564E+05, 0.21985E+05, 0.28287E+05, 0.35590E+05, 0.44049E+05,
0.53862E+05, 0.65277E+05, 0.78597E+05, 0.94191E+05, 0.11250E+06,
0.13407E+06, 0.15952E+06, 0.18962E+06, 0.22526E+06, 0.26751E+06,
0.31763E+06, 0.37714E+06, 0.44780E+06, 0.53174E+06, 0.63145E+06,
0.74989E+06, 0.89056E+06, 0.10576E+07, 0.12559E+07, 0.14912E+07,
0.17704E+07, 0.21013E+07, 0.24936E+07, 0.29582E+07, 0.35083E+07,
0.41591E+07, 0.49286E+07, 0.58379E+07, 0.69116E+07, 0.81787E+07,
0.96728E+07, 0.11433E+08, 0.13506E+08, 0.15945E+08, 0.18812E+08,
0.22180E+08, 0.26134E+08, 0.30770E+08, 0.36204E+08, 0.42565E+08,
0.50008E+08, 0.58708E+08, 0.68868E+08, 0.80725E+08, 0.94548E+08,
0.11065E+09, 0.12940E+09, 0.15119E+09, 0.17652E+09, 0.20593E+09,
0.24003E+09, 0.27956E+09, 0.32533E+09, 0.37829E+09, 0.43951E+09,
0.51021E+09, 0.59180E+09, 0.68588E+09, 0.79427E+09, 0.91904E+09,
0.10625E+10, 0.12275E+10, 0.14168E+10, 0.16341E+10, 0.18831E+10,
0.21684E+10, 0.24949E+10, 0.28684E+10, 0.32951E+10, 0.37823E+10,
0.43382E+10, 0.49719E+10, 0.56938E+10, 0.65156E+10, 0.74502E+10,
0.85125E+10, 0.97190E+10, 0.11088E+11, 0.12641E+11, 0.14401E+11,
0.16393E+11, 0.18648E+11, 0.21198E+11, 0.24079E+11, 0.27332E+11,
0.31003E+11, 0.35142E+11, 0.39807E+11, 0.45060E+11, 0.50972E+11,
0.57620E+11, 0.65091E+11, 0.73483E+11, 0.82902E+11, 0.93467E+11,
0.10531E+12, 0.11858E+12, 0.13343E+12, 0.15005E+12, 0.16864E+12,
0.18941E+12, 0.21260E+12, 0.23849E+12, 0.26737E+12, 0.29957E+12,
0.33545E+12, 0.37541E+12, 0.41987E+12, 0.46934E+12, 0.52432E+12,
0.58542E+12])
# --------------- C2H6 1231: M = 27, I = 2 ---------------------
M = 27
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.24128E+04, 0.40845E+04, 0.60896E+04,
0.84564E+04, 0.11224E+05, 0.14442E+05, 0.18170E+05, 0.22490E+05,
0.27501E+05, 0.33329E+05, 0.40131E+05, 0.48094E+05, 0.57446E+05,
0.68459E+05, 0.81458E+05, 0.96828E+05, 0.11503E+06, 0.13661E+06,
0.16221E+06, 0.19260E+06, 0.22869E+06, 0.27156E+06, 0.32249E+06,
0.38298E+06, 0.45483E+06, 0.54015E+06, 0.64144E+06, 0.76164E+06,
0.90423E+06, 0.10733E+07, 0.12737E+07, 0.15110E+07, 0.17920E+07,
0.21245E+07, 0.25176E+07, 0.29821E+07, 0.35307E+07, 0.41780E+07,
0.49414E+07, 0.58408E+07, 0.68999E+07, 0.81461E+07, 0.96110E+07,
0.11332E+08, 0.13352E+08, 0.15721E+08, 0.18497E+08, 0.21748E+08,
0.25551E+08, 0.29997E+08, 0.35189E+08, 0.41248E+08, 0.48313E+08,
0.56542E+08, 0.66122E+08, 0.77262E+08, 0.90206E+08, 0.10523E+09,
0.12267E+09, 0.14287E+09, 0.16626E+09, 0.19333E+09, 0.22462E+09,
0.26076E+09, 0.30247E+09, 0.35056E+09, 0.40596E+09, 0.46974E+09,
0.54310E+09, 0.62740E+09, 0.72420E+09, 0.83527E+09, 0.96260E+09,
0.11084E+10, 0.12754E+10, 0.14663E+10, 0.16845E+10, 0.19336E+10,
0.22178E+10, 0.25418E+10, 0.29109E+10, 0.33311E+10, 0.38090E+10,
0.43522E+10, 0.49691E+10, 0.56693E+10, 0.64633E+10, 0.73631E+10,
0.83821E+10, 0.95352E+10, 0.10839E+11, 0.12312E+11, 0.13976E+11,
0.15854E+11, 0.17971E+11, 0.20357E+11, 0.23043E+11, 0.26067E+11,
0.29467E+11, 0.33289E+11, 0.37581E+11, 0.42399E+11, 0.47804E+11,
0.53862E+11, 0.60649E+11, 0.68247E+11, 0.76750E+11, 0.86257E+11,
0.96882E+11, 0.10875E+12, 0.12199E+12, 0.13677E+12, 0.15325E+12,
0.17160E+12, 0.19204E+12, 0.21480E+12, 0.24010E+12, 0.26824E+12,
0.29950E+12])
# --------------- PH3 1111: M = 28, I = 1 ---------------------
M = 28
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.29652E+03, 0.49643E+03, 0.72810E+03,
0.98777E+03, 0.12729E+04, 0.15820E+04, 0.19145E+04, 0.22708E+04,
0.26520E+04, 0.30600E+04, 0.34971E+04, 0.39662E+04, 0.44702E+04,
0.50126E+04, 0.55970E+04, 0.62273E+04, 0.69075E+04, 0.76421E+04,
0.84357E+04, 0.92933E+04, 0.10220E+05, 0.11222E+05, 0.12304E+05,
0.13473E+05, 0.14736E+05, 0.16099E+05, 0.17571E+05, 0.19160E+05,
0.20873E+05, 0.22720E+05, 0.24710E+05, 0.26854E+05, 0.29162E+05,
0.31646E+05, 0.34317E+05, 0.37188E+05, 0.40273E+05, 0.43585E+05,
0.47140E+05, 0.50953E+05, 0.55040E+05, 0.59419E+05, 0.64108E+05,
0.69127E+05, 0.74496E+05, 0.80236E+05, 0.86369E+05, 0.92918E+05,
0.99909E+05, 0.10737E+06, 0.11532E+06, 0.12380E+06, 0.13282E+06,
0.14244E+06, 0.15266E+06, 0.16354E+06, 0.17511E+06, 0.18739E+06,
0.20044E+06, 0.21430E+06, 0.22900E+06, 0.24459E+06, 0.26111E+06,
0.27862E+06, 0.29716E+06, 0.31680E+06, 0.33757E+06, 0.35954E+06,
0.38277E+06, 0.40733E+06, 0.43326E+06, 0.46065E+06, 0.48955E+06,
0.52005E+06, 0.55222E+06, 0.58614E+06, 0.62188E+06, 0.65953E+06,
0.69917E+06, 0.74091E+06, 0.78483E+06, 0.83103E+06, 0.87960E+06,
0.93067E+06, 0.98432E+06, 0.10407E+07, 0.10999E+07, 0.11620E+07,
0.12272E+07, 0.12956E+07, 0.13673E+07, 0.14425E+07, 0.15212E+07,
0.16038E+07, 0.16902E+07, 0.17808E+07, 0.18755E+07, 0.19746E+07,
0.20784E+07, 0.21868E+07, 0.23002E+07, 0.24187E+07, 0.25425E+07,
0.26719E+07, 0.28070E+07, 0.29480E+07, 0.30952E+07, 0.32488E+07,
0.34091E+07, 0.35762E+07, 0.37504E+07, 0.39320E+07, 0.41213E+07,
0.43185E+07, 0.45239E+07, 0.47378E+07, 0.49605E+07, 0.51923E+07,
0.54335E+07])
# --------------- COF2 269: M = 29, I = 1 ---------------------
M = 29
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.54999E+04, 0.92749E+04, 0.13668E+05,
0.18643E+05, 0.24224E+05, 0.30487E+05, 0.37547E+05, 0.45543E+05,
0.54639E+05, 0.65019E+05, 0.76886E+05, 0.90462E+05, 0.10600E+06,
0.12377E+06, 0.14407E+06, 0.16723E+06, 0.19363E+06, 0.22367E+06,
0.25780E+06, 0.29650E+06, 0.34031E+06, 0.38982E+06, 0.44568E+06,
0.50859E+06, 0.57932E+06, 0.65872E+06, 0.74770E+06, 0.84724E+06,
0.95844E+06, 0.10825E+07, 0.12205E+07, 0.13741E+07, 0.15446E+07,
0.17336E+07, 0.19428E+07, 0.21742E+07, 0.24296E+07, 0.27113E+07,
0.30214E+07, 0.33626E+07, 0.37373E+07, 0.41484E+07, 0.45989E+07,
0.50921E+07, 0.56313E+07, 0.62202E+07, 0.68626E+07, 0.75628E+07,
0.83251E+07, 0.91542E+07, 0.10055E+08, 0.11033E+08, 0.12093E+08,
0.13242E+08, 0.14486E+08, 0.15831E+08, 0.17284E+08, 0.18853E+08,
0.20546E+08, 0.22371E+08, 0.24335E+08, 0.26450E+08, 0.28724E+08,
0.31167E+08, 0.33790E+08, 0.36605E+08, 0.39623E+08, 0.42856E+08,
0.46318E+08, 0.50022E+08, 0.53983E+08, 0.58215E+08, 0.62735E+08,
0.67558E+08, 0.72702E+08, 0.78186E+08, 0.84028E+08, 0.90247E+08,
0.96865E+08, 0.10390E+09, 0.11138E+09, 0.11933E+09, 0.12777E+09,
0.13672E+09, 0.14622E+09, 0.15629E+09, 0.16695E+09, 0.17825E+09,
0.19021E+09, 0.20287E+09, 0.21625E+09, 0.23039E+09, 0.24534E+09,
0.26113E+09, 0.27779E+09, 0.29538E+09, 0.31392E+09, 0.33348E+09,
0.35409E+09, 0.37580E+09, 0.39867E+09, 0.42274E+09, 0.44806E+09,
0.47470E+09, 0.50271E+09, 0.53215E+09, 0.56308E+09, 0.59557E+09,
0.62968E+09, 0.66548E+09, 0.70304E+09, 0.74243E+09, 0.78374E+09,
0.82703E+09, 0.87240E+09, 0.91992E+09, 0.96967E+09, 0.10218E+10,
0.10763E+10])
# --------------- COF2 369: M = 29, I = 2 --------------------- not in TIPS-2011
M = 29
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- SF6 29: M = 30, I = 1 ---------------------
M = 30
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.46373E+05, 0.78844E+05, 0.11939E+06,
0.17183E+06, 0.24247E+06, 0.34059E+06, 0.47963E+06, 0.67906E+06,
0.96713E+06, 0.13848E+07, 0.19911E+07, 0.28714E+07, 0.41481E+07,
0.59956E+07, 0.86617E+07, 0.12496E+08, 0.17991E+08, 0.25832E+08,
0.36971E+08, 0.52724E+08, 0.74895E+08, 0.10595E+09, 0.14923E+09,
0.20925E+09, 0.29208E+09, 0.40582E+09, 0.56124E+09, 0.77259E+09,
0.10586E+10, 0.14439E+10, 0.19605E+10, 0.26500E+10, 0.35662E+10,
0.47781E+10, 0.63747E+10, 0.84689E+10, 0.11205E+11, 0.14765E+11,
0.19378E+11, 0.25336E+11, 0.32998E+11, 0.42819E+11, 0.55361E+11,
0.71323E+11, 0.91569E+11, 0.11716E+12, 0.14941E+12, 0.18992E+12,
0.24065E+12, 0.30398E+12, 0.38283E+12, 0.48069E+12, 0.60182E+12,
0.75136E+12, 0.93546E+12, 0.11615E+13, 0.14384E+13, 0.17767E+13,
0.21890E+13, 0.26903E+13, 0.32984E+13, 0.40344E+13, 0.49232E+13,
0.59942E+13, 0.72819E+13, 0.88272E+13, 0.10678E+14, 0.12889E+14,
0.15527E+14, 0.18666E+14, 0.22397E+14, 0.26823E+14, 0.32062E+14,
0.38253E+14, 0.45558E+14, 0.54161E+14, 0.64277E+14, 0.76153E+14,
0.90072E+14, 0.10636E+15, 0.12539E+15, 0.14759E+15, 0.17345E+15,
0.20354E+15, 0.23848E+15, 0.27902E+15, 0.32597E+15, 0.38028E+15,
0.44303E+15, 0.51542E+15, 0.59883E+15, 0.69482E+15, 0.80516E+15,
0.93182E+15, 0.10770E+16, 0.12434E+16, 0.14336E+16, 0.16511E+16,
0.18992E+16, 0.21821E+16, 0.25043E+16, 0.28709E+16, 0.32875E+16,
0.37604E+16, 0.42968E+16, 0.49046E+16, 0.55925E+16, 0.63704E+16,
0.72492E+16, 0.82411E+16, 0.93596E+16, 0.10620E+17, 0.12038E+17,
0.13633E+17, 0.15425E+17, 0.17438E+17, 0.19694E+17, 0.22224E+17,
0.25057E+17])
# --------------- H2S 121: M = 31, I = 1 ---------------------
M = 31
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.47192E+02, 0.78671E+02, 0.11510E+03,
0.15589E+03, 0.20061E+03, 0.24896E+03, 0.30070E+03, 0.35571E+03,
0.41386E+03, 0.47513E+03, 0.53951E+03, 0.60703E+03, 0.67772E+03,
0.75167E+03, 0.82896E+03, 0.90969E+03, 0.99396E+03, 0.10819E+04,
0.11736E+04, 0.12692E+04, 0.13689E+04, 0.14727E+04, 0.15809E+04,
0.16937E+04, 0.18111E+04, 0.19333E+04, 0.20606E+04, 0.21931E+04,
0.23309E+04, 0.24744E+04, 0.26236E+04, 0.27788E+04, 0.29403E+04,
0.31081E+04, 0.32825E+04, 0.34638E+04, 0.36522E+04, 0.38478E+04,
0.40510E+04, 0.42619E+04, 0.44808E+04, 0.47080E+04, 0.49437E+04,
0.51881E+04, 0.54415E+04, 0.57042E+04, 0.59764E+04, 0.62584E+04,
0.65505E+04, 0.68529E+04, 0.71660E+04, 0.74899E+04, 0.78251E+04,
0.81718E+04, 0.85303E+04, 0.89008E+04, 0.92838E+04, 0.96795E+04,
0.10088E+05, 0.10510E+05, 0.10946E+05, 0.11396E+05, 0.11860E+05,
0.12339E+05, 0.12833E+05, 0.13342E+05, 0.13867E+05, 0.14408E+05,
0.14966E+05, 0.15540E+05, 0.16132E+05, 0.16741E+05, 0.17368E+05,
0.18013E+05, 0.18677E+05, 0.19361E+05, 0.20064E+05, 0.20786E+05,
0.21529E+05, 0.22293E+05, 0.23078E+05, 0.23885E+05, 0.24714E+05,
0.25565E+05, 0.26439E+05, 0.27337E+05, 0.28258E+05, 0.29204E+05,
0.30174E+05, 0.31170E+05, 0.32191E+05, 0.33239E+05, 0.34313E+05,
0.35414E+05, 0.36543E+05, 0.37700E+05, 0.38886E+05, 0.40101E+05,
0.41346E+05, 0.42621E+05, 0.43926E+05, 0.45263E+05, 0.46631E+05,
0.48033E+05, 0.49466E+05, 0.50934E+05, 0.52435E+05, 0.53971E+05,
0.55542E+05, 0.57149E+05, 0.58792E+05, 0.60472E+05, 0.62190E+05,
0.63946E+05, 0.65740E+05, 0.67574E+05, 0.69448E+05, 0.71362E+05,
0.73318E+05])
# --------------- H2S 141: M = 31, I = 2 ---------------------
M = 31
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.47310E+02, 0.78869E+02, 0.11539E+03,
0.15628E+03, 0.20112E+03, 0.24959E+03, 0.30147E+03, 0.35661E+03,
0.41491E+03, 0.47634E+03, 0.54088E+03, 0.60857E+03, 0.67945E+03,
0.75359E+03, 0.83107E+03, 0.91201E+03, 0.99649E+03, 0.10846E+04,
0.11766E+04, 0.12724E+04, 0.13724E+04, 0.14765E+04, 0.15850E+04,
0.16980E+04, 0.18157E+04, 0.19382E+04, 0.20658E+04, 0.21987E+04,
0.23369E+04, 0.24807E+04, 0.26303E+04, 0.27859E+04, 0.29478E+04,
0.31160E+04, 0.32909E+04, 0.34727E+04, 0.36615E+04, 0.38576E+04,
0.40613E+04, 0.42728E+04, 0.44923E+04, 0.47200E+04, 0.49563E+04,
0.52013E+04, 0.54554E+04, 0.57188E+04, 0.59917E+04, 0.62744E+04,
0.65672E+04, 0.68704E+04, 0.71843E+04, 0.75090E+04, 0.78451E+04,
0.81926E+04, 0.85520E+04, 0.89236E+04, 0.93075E+04, 0.97042E+04,
0.10114E+05, 0.10537E+05, 0.10974E+05, 0.11425E+05, 0.11890E+05,
0.12370E+05, 0.12866E+05, 0.13376E+05, 0.13903E+05, 0.14445E+05,
0.15004E+05, 0.15580E+05, 0.16173E+05, 0.16784E+05, 0.17412E+05,
0.18059E+05, 0.18725E+05, 0.19410E+05, 0.20115E+05, 0.20839E+05,
0.21584E+05, 0.22350E+05, 0.23137E+05, 0.23946E+05, 0.24777E+05,
0.25630E+05, 0.26507E+05, 0.27407E+05, 0.28330E+05, 0.29278E+05,
0.30251E+05, 0.31249E+05, 0.32273E+05, 0.33324E+05, 0.34401E+05,
0.35505E+05, 0.36637E+05, 0.37797E+05, 0.38985E+05, 0.40204E+05,
0.41451E+05, 0.42729E+05, 0.44038E+05, 0.45379E+05, 0.46751E+05,
0.48155E+05, 0.49593E+05, 0.51064E+05, 0.52569E+05, 0.54109E+05,
0.55684E+05, 0.57295E+05, 0.58943E+05, 0.60627E+05, 0.62349E+05,
0.64109E+05, 0.65908E+05, 0.67747E+05, 0.69625E+05, 0.71544E+05,
0.73505E+05])
# --------------- H2S 131: M = 30, I = 3 ---------------------
M = 31
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.18901E+03, 0.31509E+03, 0.46102E+03,
0.62437E+03, 0.80349E+03, 0.99713E+03, 0.12044E+04, 0.14247E+04,
0.16576E+04, 0.19030E+04, 0.21609E+04, 0.24313E+04, 0.27145E+04,
0.30106E+04, 0.33202E+04, 0.36436E+04, 0.39811E+04, 0.43332E+04,
0.47005E+04, 0.50835E+04, 0.54827E+04, 0.58987E+04, 0.63321E+04,
0.67836E+04, 0.72538E+04, 0.77434E+04, 0.82532E+04, 0.87838E+04,
0.93360E+04, 0.99106E+04, 0.10508E+05, 0.11130E+05, 0.11777E+05,
0.12449E+05, 0.13147E+05, 0.13874E+05, 0.14628E+05, 0.15412E+05,
0.16225E+05, 0.17070E+05, 0.17947E+05, 0.18857E+05, 0.19801E+05,
0.20780E+05, 0.21795E+05, 0.22847E+05, 0.23937E+05, 0.25067E+05,
0.26236E+05, 0.27448E+05, 0.28702E+05, 0.29999E+05, 0.31342E+05,
0.32730E+05, 0.34166E+05, 0.35650E+05, 0.37184E+05, 0.38769E+05,
0.40406E+05, 0.42097E+05, 0.43842E+05, 0.45644E+05, 0.47503E+05,
0.49421E+05, 0.51399E+05, 0.53439E+05, 0.55542E+05, 0.57709E+05,
0.59942E+05, 0.62242E+05, 0.64611E+05, 0.67051E+05, 0.69563E+05,
0.72148E+05, 0.74808E+05, 0.77545E+05, 0.80360E+05, 0.83255E+05,
0.86232E+05, 0.89291E+05, 0.92435E+05, 0.95667E+05, 0.98986E+05,
0.10240E+06, 0.10590E+06, 0.10949E+06, 0.11318E+06, 0.11697E+06,
0.12086E+06, 0.12484E+06, 0.12893E+06, 0.13313E+06, 0.13743E+06,
0.14184E+06, 0.14637E+06, 0.15100E+06, 0.15575E+06, 0.16062E+06,
0.16560E+06, 0.17071E+06, 0.17594E+06, 0.18129E+06, 0.18677E+06,
0.19238E+06, 0.19813E+06, 0.20400E+06, 0.21002E+06, 0.21617E+06,
0.22246E+06, 0.22890E+06, 0.23548E+06, 0.24221E+06, 0.24909E+06,
0.25612E+06, 0.26331E+06, 0.27065E+06, 0.27816E+06, 0.28583E+06,
0.29366E+06])
# --------------- HCOOH 126: M = 32, I = 1 ---------------------
M = 32
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.31899E+04, 0.53773E+04, 0.79205E+04,
0.10792E+05, 0.13993E+05, 0.17550E+05, 0.21509E+05, 0.25930E+05,
0.30885E+05, 0.36460E+05, 0.42750E+05, 0.49864E+05, 0.57926E+05,
0.67071E+05, 0.77453E+05, 0.89243E+05, 0.10263E+06, 0.11783E+06,
0.13507E+06, 0.15462E+06, 0.17676E+06, 0.20183E+06, 0.23018E+06,
0.26221E+06, 0.29836E+06, 0.33911E+06, 0.38501E+06, 0.43664E+06,
0.49467E+06, 0.55981E+06, 0.63286E+06, 0.71470E+06, 0.80628E+06,
0.90865E+06, 0.10230E+07, 0.11505E+07, 0.12927E+07, 0.14509E+07,
0.16269E+07, 0.18225E+07, 0.20396E+07, 0.22804E+07, 0.25472E+07,
0.28425E+07, 0.31692E+07, 0.35301E+07, 0.39285E+07, 0.43681E+07,
0.48525E+07, 0.53858E+07, 0.59727E+07, 0.66178E+07, 0.73265E+07,
0.81042E+07, 0.89571E+07, 0.98918E+07, 0.10915E+08, 0.12035E+08,
0.13259E+08, 0.14597E+08, 0.16057E+08, 0.17650E+08, 0.19387E+08,
0.21279E+08, 0.23339E+08, 0.25579E+08, 0.28016E+08, 0.30663E+08,
0.33536E+08, 0.36655E+08, 0.40037E+08, 0.43701E+08, 0.47671E+08,
0.51967E+08, 0.56614E+08, 0.61639E+08, 0.67068E+08, 0.72930E+08,
0.79257E+08, 0.86082E+08, 0.93439E+08, 0.10137E+09, 0.10990E+09,
0.11909E+09, 0.12898E+09, 0.13960E+09, 0.15102E+09, 0.16329E+09,
0.17646E+09, 0.19059E+09, 0.20575E+09, 0.22200E+09, 0.23941E+09,
0.25806E+09, 0.27802E+09, 0.29938E+09, 0.32223E+09, 0.34666E+09,
0.37276E+09, 0.40064E+09, 0.43041E+09, 0.46218E+09, 0.49607E+09,
0.53221E+09, 0.57074E+09, 0.61179E+09, 0.65551E+09, 0.70206E+09,
0.75159E+09, 0.80430E+09, 0.86034E+09, 0.91992E+09, 0.98324E+09,
0.10505E+10, 0.11219E+10, 0.11977E+10, 0.12782E+10, 0.13635E+10,
0.14540E+10])
# --------------- HO2 166: M = 33, I = 1 ---------------------
M = 33
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.39277E+03, 0.66062E+03, 0.97123E+03,
0.13194E+04, 0.17014E+04, 0.21148E+04, 0.25578E+04, 0.30296E+04,
0.35297E+04, 0.40585E+04, 0.46167E+04, 0.52055E+04, 0.58264E+04,
0.64809E+04, 0.71707E+04, 0.78978E+04, 0.86641E+04, 0.94715E+04,
0.10322E+05, 0.11218E+05, 0.12161E+05, 0.13154E+05, 0.14198E+05,
0.15296E+05, 0.16449E+05, 0.17661E+05, 0.18933E+05, 0.20267E+05,
0.21666E+05, 0.23133E+05, 0.24669E+05, 0.26277E+05, 0.27960E+05,
0.29720E+05, 0.31560E+05, 0.33482E+05, 0.35489E+05, 0.37584E+05,
0.39769E+05, 0.42048E+05, 0.44423E+05, 0.46898E+05, 0.49475E+05,
0.52157E+05, 0.54948E+05, 0.57850E+05, 0.60868E+05, 0.64003E+05,
0.67261E+05, 0.70643E+05, 0.74154E+05, 0.77797E+05, 0.81575E+05,
0.85492E+05, 0.89553E+05, 0.93760E+05, 0.98118E+05, 0.10263E+06,
0.10730E+06, 0.11213E+06, 0.11713E+06, 0.12230E+06, 0.12765E+06,
0.13317E+06, 0.13888E+06, 0.14478E+06, 0.15086E+06, 0.15715E+06,
0.16363E+06, 0.17032E+06, 0.17723E+06, 0.18434E+06, 0.19168E+06,
0.19924E+06, 0.20704E+06, 0.21506E+06, 0.22333E+06, 0.23185E+06,
0.24061E+06, 0.24963E+06, 0.25891E+06, 0.26846E+06, 0.27828E+06,
0.28838E+06, 0.29876E+06, 0.30943E+06, 0.32039E+06, 0.33166E+06,
0.34323E+06, 0.35512E+06, 0.36732E+06, 0.37985E+06, 0.39271E+06,
0.40590E+06, 0.41944E+06, 0.43333E+06, 0.44758E+06, 0.46219E+06,
0.47717E+06, 0.49252E+06, 0.50826E+06, 0.52439E+06, 0.54091E+06,
0.55784E+06, 0.57518E+06, 0.59293E+06, 0.61112E+06, 0.62973E+06,
0.64878E+06, 0.66828E+06, 0.68824E+06, 0.70866E+06, 0.72955E+06,
0.75091E+06, 0.77276E+06, 0.79511E+06, 0.81795E+06, 0.84131E+06,
0.86518E+06])
# --------------- O 6: M = 34, I = 1 --------------------- not in TIPS-2011
M = 34
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- ClONO2 5646: M = 35, I = 1 ---------------------
M = 35
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.11444E+06, 0.21121E+06, 0.34858E+06,
0.53934E+06, 0.80041E+06, 0.11539E+07, 0.16286E+07, 0.22614E+07,
0.30992E+07, 0.42015E+07, 0.56426E+07, 0.75152E+07, 0.99344E+07,
0.13042E+08, 0.17012E+08, 0.22058E+08, 0.28437E+08, 0.36463E+08,
0.46514E+08, 0.59042E+08, 0.74589E+08, 0.93801E+08, 0.11744E+09,
0.14643E+09, 0.18181E+09, 0.22486E+09, 0.27705E+09, 0.34009E+09,
0.41598E+09, 0.50705E+09, 0.61599E+09, 0.74590E+09, 0.90037E+09,
0.10835E+10, 0.13001E+10, 0.15554E+10, 0.18556E+10, 0.22079E+10,
0.26200E+10, 0.31012E+10, 0.36615E+10, 0.43126E+10, 0.50675E+10,
0.59409E+10, 0.69492E+10, 0.81110E+10, 0.94469E+10, 0.10980E+11,
0.12736E+11, 0.14745E+11, 0.17037E+11, 0.19649E+11, 0.22620E+11,
0.25994E+11, 0.29819E+11, 0.34150E+11, 0.39044E+11, 0.44568E+11,
0.50794E+11, 0.57799E+11, 0.65672E+11, 0.74506E+11, 0.84408E+11,
0.95490E+11, 0.10788E+12, 0.12171E+12, 0.13713E+12, 0.15431E+12,
0.17342E+12, 0.19465E+12, 0.21822E+12, 0.24435E+12, 0.27329E+12,
0.30530E+12, 0.34069E+12, 0.37976E+12, 0.42286E+12, 0.47034E+12,
0.52262E+12, 0.58012E+12, 0.64330E+12, 0.71267E+12, 0.78875E+12,
0.87214E+12, 0.96344E+12, 0.10633E+13, 0.11725E+13, 0.12918E+13,
0.14220E+13, 0.15640E+13, 0.17188E+13, 0.18873E+13, 0.20706E+13,
0.22700E+13, 0.24866E+13, 0.27218E+13, 0.29771E+13, 0.32538E+13,
0.35537E+13, 0.38784E+13, 0.42299E+13, 0.46100E+13, 0.50208E+13,
0.54645E+13, 0.59435E+13, 0.64603E+13, 0.70175E+13, 0.76180E+13,
0.82647E+13, 0.89608E+13, 0.97097E+13, 0.10515E+14, 0.11380E+14,
0.12310E+14, 0.13307E+14, 0.14378E+14, 0.15526E+14, 0.16756E+14,
0.18075E+14])
# --------------- ClONO2 7646: M = 35, I = 2 ---------------------
M = 35
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.11735E+06, 0.21659E+06, 0.35745E+06,
0.55307E+06, 0.82078E+06, 0.11833E+07, 0.16700E+07, 0.23189E+07,
0.31781E+07, 0.43084E+07, 0.57862E+07, 0.77065E+07, 0.10187E+08,
0.13374E+08, 0.17445E+08, 0.22619E+08, 0.29161E+08, 0.37391E+08,
0.47698E+08, 0.60545E+08, 0.76487E+08, 0.96188E+08, 0.12043E+09,
0.15015E+09, 0.18644E+09, 0.23059E+09, 0.28410E+09, 0.34874E+09,
0.42657E+09, 0.51995E+09, 0.63167E+09, 0.76489E+09, 0.92329E+09,
0.11111E+10, 0.13331E+10, 0.15950E+10, 0.19029E+10, 0.22641E+10,
0.26867E+10, 0.31801E+10, 0.37547E+10, 0.44224E+10, 0.51965E+10,
0.60921E+10, 0.71261E+10, 0.83174E+10, 0.96873E+10, 0.11260E+11,
0.13061E+11, 0.15120E+11, 0.17471E+11, 0.20149E+11, 0.23196E+11,
0.26656E+11, 0.30578E+11, 0.35019E+11, 0.40038E+11, 0.45703E+11,
0.52087E+11, 0.59270E+11, 0.67343E+11, 0.76403E+11, 0.86556E+11,
0.97921E+11, 0.11062E+12, 0.12481E+12, 0.14062E+12, 0.15824E+12,
0.17783E+12, 0.19961E+12, 0.22377E+12, 0.25057E+12, 0.28024E+12,
0.31308E+12, 0.34936E+12, 0.38943E+12, 0.43362E+12, 0.48232E+12,
0.53593E+12, 0.59489E+12, 0.65968E+12, 0.73081E+12, 0.80883E+12,
0.89434E+12, 0.98797E+12, 0.10904E+13, 0.12024E+13, 0.13247E+13,
0.14582E+13, 0.16038E+13, 0.17625E+13, 0.19353E+13, 0.21233E+13,
0.23278E+13, 0.25499E+13, 0.27911E+13, 0.30528E+13, 0.33366E+13,
0.36442E+13, 0.39772E+13, 0.43376E+13, 0.47273E+13, 0.51486E+13,
0.56036E+13, 0.60948E+13, 0.66248E+13, 0.71962E+13, 0.78119E+13,
0.84751E+13, 0.91889E+13, 0.99569E+13, 0.10783E+14, 0.11670E+14,
0.12623E+14, 0.13646E+14, 0.14744E+14, 0.15921E+14, 0.17183E+14,
0.18535E+14])
# --------------- NOp 46: M = 36, I = 1 ---------------------
M = 36
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.63956E+02, 0.90185E+02, 0.11642E+03,
0.14265E+03, 0.16889E+03, 0.19513E+03, 0.22138E+03, 0.24763E+03,
0.27388E+03, 0.30013E+03, 0.32639E+03, 0.35266E+03, 0.37894E+03,
0.40523E+03, 0.43155E+03, 0.45790E+03, 0.48429E+03, 0.51074E+03,
0.53725E+03, 0.56383E+03, 0.59052E+03, 0.61731E+03, 0.64422E+03,
0.67127E+03, 0.69846E+03, 0.72582E+03, 0.75335E+03, 0.78108E+03,
0.80901E+03, 0.83715E+03, 0.86552E+03, 0.89413E+03, 0.92298E+03,
0.95208E+03, 0.98144E+03, 0.10111E+04, 0.10410E+04, 0.10712E+04,
0.11017E+04, 0.11325E+04, 0.11636E+04, 0.11950E+04, 0.12268E+04,
0.12588E+04, 0.12912E+04, 0.13239E+04, 0.13570E+04, 0.13903E+04,
0.14241E+04, 0.14581E+04, 0.14926E+04, 0.15273E+04, 0.15624E+04,
0.15979E+04, 0.16337E+04, 0.16699E+04, 0.17065E+04, 0.17434E+04,
0.17806E+04, 0.18183E+04, 0.18563E+04, 0.18947E+04, 0.19334E+04,
0.19725E+04, 0.20120E+04, 0.20519E+04, 0.20921E+04, 0.21327E+04,
0.21737E+04, 0.22151E+04, 0.22568E+04, 0.22990E+04, 0.23415E+04,
0.23844E+04, 0.24276E+04, 0.24713E+04, 0.25153E+04, 0.25598E+04,
0.26046E+04, 0.26497E+04, 0.26953E+04, 0.27413E+04, 0.27876E+04,
0.28343E+04, 0.28815E+04, 0.29290E+04, 0.29769E+04, 0.30251E+04,
0.30738E+04, 0.31229E+04, 0.31723E+04, 0.32222E+04, 0.32724E+04,
0.33230E+04, 0.33740E+04, 0.34254E+04, 0.34772E+04, 0.35294E+04,
0.35819E+04, 0.36349E+04, 0.36883E+04, 0.37420E+04, 0.37961E+04,
0.38507E+04, 0.39056E+04, 0.39609E+04, 0.40166E+04, 0.40727E+04,
0.41292E+04, 0.41861E+04, 0.42434E+04, 0.43010E+04, 0.43591E+04,
0.44176E+04, 0.44764E+04, 0.45357E+04, 0.45953E+04, 0.46554E+04,
0.47158E+04])
# --------------- HOBr 169: M = 37, I = 1 ---------------------
M = 37
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.24445E+04, 0.41206E+04, 0.60683E+04,
0.82610E+04, 0.10689E+05, 0.13352E+05, 0.16261E+05, 0.19427E+05,
0.22867E+05, 0.26600E+05, 0.30643E+05, 0.35018E+05, 0.39745E+05,
0.44844E+05, 0.50338E+05, 0.56249E+05, 0.62599E+05, 0.69410E+05,
0.76706E+05, 0.84509E+05, 0.92845E+05, 0.10174E+06, 0.11121E+06,
0.12128E+06, 0.13199E+06, 0.14335E+06, 0.15540E+06, 0.16815E+06,
0.18165E+06, 0.19591E+06, 0.21096E+06, 0.22684E+06, 0.24358E+06,
0.26120E+06, 0.27974E+06, 0.29922E+06, 0.31969E+06, 0.34118E+06,
0.36372E+06, 0.38735E+06, 0.41210E+06, 0.43800E+06, 0.46511E+06,
0.49345E+06, 0.52307E+06, 0.55400E+06, 0.58628E+06, 0.61997E+06,
0.65509E+06, 0.69170E+06, 0.72984E+06, 0.76954E+06, 0.81087E+06,
0.85386E+06, 0.89856E+06, 0.94502E+06, 0.99329E+06, 0.10434E+07,
0.10955E+07, 0.11495E+07, 0.12055E+07, 0.12636E+07, 0.13238E+07,
0.13862E+07, 0.14508E+07, 0.15177E+07, 0.15870E+07, 0.16587E+07,
0.17328E+07, 0.18095E+07, 0.18888E+07, 0.19707E+07, 0.20554E+07,
0.21428E+07, 0.22331E+07, 0.23263E+07, 0.24225E+07, 0.25217E+07,
0.26241E+07, 0.27296E+07, 0.28385E+07, 0.29506E+07, 0.30662E+07,
0.31853E+07, 0.33079E+07, 0.34341E+07, 0.35641E+07, 0.36979E+07,
0.38355E+07, 0.39771E+07, 0.41228E+07, 0.42725E+07, 0.44265E+07,
0.45848E+07, 0.47474E+07, 0.49145E+07, 0.50862E+07, 0.52624E+07,
0.54435E+07, 0.56293E+07, 0.58201E+07, 0.60159E+07, 0.62168E+07,
0.64229E+07, 0.66343E+07, 0.68511E+07, 0.70734E+07, 0.73013E+07,
0.75349E+07, 0.77742E+07, 0.80196E+07, 0.82709E+07, 0.85283E+07,
0.87920E+07, 0.90620E+07, 0.93385E+07, 0.96215E+07, 0.99112E+07,
0.10208E+08])
# --------------- HOBr 161: M = 37, I = 2 ---------------------
M = 37
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(8.)
TIPS_ISO_HASH[(M, I)] = float32([0.24350E+04, 0.41047E+04, 0.60448E+04,
0.82291E+04, 0.10648E+05, 0.13301E+05, 0.16200E+05, 0.19355E+05,
0.22784E+05, 0.26504E+05, 0.30534E+05, 0.34895E+05, 0.39607E+05,
0.44691E+05, 0.50169E+05, 0.56063E+05, 0.62394E+05, 0.69186E+05,
0.76461E+05, 0.84243E+05, 0.92555E+05, 0.10142E+06, 0.11087E+06,
0.12091E+06, 0.13159E+06, 0.14292E+06, 0.15494E+06, 0.16766E+06,
0.18112E+06, 0.19534E+06, 0.21036E+06, 0.22620E+06, 0.24289E+06,
0.26047E+06, 0.27896E+06, 0.29840E+06, 0.31882E+06, 0.34025E+06,
0.36274E+06, 0.38630E+06, 0.41099E+06, 0.43683E+06, 0.46387E+06,
0.49215E+06, 0.52169E+06, 0.55255E+06, 0.58475E+06, 0.61836E+06,
0.65340E+06, 0.68992E+06, 0.72796E+06, 0.76757E+06, 0.80880E+06,
0.85169E+06, 0.89628E+06, 0.94263E+06, 0.99079E+06, 0.10408E+07,
0.10927E+07, 0.11466E+07, 0.12025E+07, 0.12605E+07, 0.13205E+07,
0.13828E+07, 0.14472E+07, 0.15140E+07, 0.15831E+07, 0.16546E+07,
0.17286E+07, 0.18051E+07, 0.18842E+07, 0.19660E+07, 0.20504E+07,
0.21377E+07, 0.22277E+07, 0.23207E+07, 0.24167E+07, 0.25157E+07,
0.26178E+07, 0.27231E+07, 0.28317E+07, 0.29436E+07, 0.30589E+07,
0.31777E+07, 0.33001E+07, 0.34260E+07, 0.35557E+07, 0.36892E+07,
0.38265E+07, 0.39678E+07, 0.41131E+07, 0.42626E+07, 0.44162E+07,
0.45741E+07, 0.47364E+07, 0.49031E+07, 0.50744E+07, 0.52503E+07,
0.54309E+07, 0.56164E+07, 0.58067E+07, 0.60021E+07, 0.62025E+07,
0.64081E+07, 0.66191E+07, 0.68354E+07, 0.70572E+07, 0.72846E+07,
0.75177E+07, 0.77565E+07, 0.80013E+07, 0.82521E+07, 0.85090E+07,
0.87721E+07, 0.90415E+07, 0.93173E+07, 0.95997E+07, 0.98888E+07,
0.10185E+08])
# --------------- C2H4 221: M = 38, I = 1 ---------------------
M = 38
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.95843E+03, 0.16137E+04, 0.23744E+04,
0.32285E+04, 0.41694E+04, 0.51963E+04, 0.63143E+04, 0.75337E+04,
0.88702E+04, 0.10344E+05, 0.11978E+05, 0.13802E+05, 0.15846E+05,
0.18145E+05, 0.20740E+05, 0.23675E+05, 0.27000E+05, 0.30770E+05,
0.35048E+05, 0.39905E+05, 0.45420E+05, 0.51680E+05, 0.58786E+05,
0.66850E+05, 0.75997E+05, 0.86369E+05, 0.98123E+05, 0.11144E+06,
0.12651E+06, 0.14356E+06, 0.16284E+06, 0.18463E+06, 0.20923E+06,
0.23699E+06, 0.26831E+06, 0.30360E+06, 0.34334E+06, 0.38808E+06,
0.43840E+06, 0.49495E+06, 0.55847E+06, 0.62976E+06, 0.70973E+06,
0.79935E+06, 0.89973E+06, 0.10121E+07, 0.11378E+07, 0.12782E+07,
0.14351E+07, 0.16102E+07, 0.18055E+07, 0.20231E+07, 0.22656E+07,
0.25354E+07, 0.28356E+07, 0.31692E+07, 0.35398E+07, 0.39511E+07,
0.44074E+07, 0.49132E+07, 0.54736E+07, 0.60940E+07, 0.67803E+07,
0.75392E+07, 0.83776E+07, 0.93035E+07, 0.10325E+08, 0.11452E+08,
0.12694E+08, 0.14062E+08, 0.15567E+08, 0.17224E+08, 0.19045E+08,
0.21046E+08, 0.23243E+08, 0.25655E+08, 0.28300E+08, 0.31200E+08,
0.34377E+08, 0.37856E+08, 0.41662E+08, 0.45826E+08, 0.50378E+08,
0.55351E+08, 0.60781E+08, 0.66707E+08, 0.73172E+08, 0.80219E+08,
0.87899E+08, 0.96262E+08, 0.10537E+09, 0.11527E+09, 0.12604E+09,
0.13775E+09, 0.15047E+09, 0.16428E+09, 0.17927E+09, 0.19553E+09,
0.21316E+09, 0.23226E+09, 0.25296E+09, 0.27537E+09, 0.29963E+09,
0.32587E+09, 0.35425E+09, 0.38492E+09, 0.41805E+09, 0.45383E+09,
0.49246E+09, 0.53413E+09, 0.57908E+09, 0.62754E+09, 0.67977E+09,
0.73602E+09, 0.79660E+09, 0.86179E+09, 0.93194E+09, 0.10074E+10,
0.10885E+10])
# --------------- C2H4 231: M = 38, I = 2 ---------------------
M = 38
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.39228E+04, 0.66051E+04, 0.97190E+04,
0.13215E+05, 0.17066E+05, 0.21270E+05, 0.25846E+05, 0.30838E+05,
0.36309E+05, 0.42341E+05, 0.49032E+05, 0.56496E+05, 0.64862E+05,
0.74275E+05, 0.84897E+05, 0.96912E+05, 0.11052E+06, 0.12595E+06,
0.14347E+06, 0.16335E+06, 0.18592E+06, 0.21155E+06, 0.24064E+06,
0.27365E+06, 0.31109E+06, 0.35354E+06, 0.40166E+06, 0.45615E+06,
0.51785E+06, 0.58765E+06, 0.66657E+06, 0.75575E+06, 0.85646E+06,
0.97011E+06, 0.10983E+07, 0.12428E+07, 0.14055E+07, 0.15886E+07,
0.17945E+07, 0.20260E+07, 0.22861E+07, 0.25779E+07, 0.29052E+07,
0.32721E+07, 0.36830E+07, 0.41429E+07, 0.46573E+07, 0.52323E+07,
0.58744E+07, 0.65912E+07, 0.73906E+07, 0.82816E+07, 0.92740E+07,
0.10379E+08, 0.11607E+08, 0.12973E+08, 0.14490E+08, 0.16174E+08,
0.18042E+08, 0.20112E+08, 0.22406E+08, 0.24945E+08, 0.27755E+08,
0.30861E+08, 0.34293E+08, 0.38083E+08, 0.42266E+08, 0.46878E+08,
0.51961E+08, 0.57560E+08, 0.63724E+08, 0.70504E+08, 0.77959E+08,
0.86150E+08, 0.95145E+08, 0.10502E+09, 0.11585E+09, 0.12772E+09,
0.14072E+09, 0.15496E+09, 0.17054E+09, 0.18759E+09, 0.20622E+09,
0.22658E+09, 0.24880E+09, 0.27306E+09, 0.29952E+09, 0.32837E+09,
0.35981E+09, 0.39404E+09, 0.43131E+09, 0.47186E+09, 0.51595E+09,
0.56387E+09, 0.61594E+09, 0.67247E+09, 0.73382E+09, 0.80038E+09,
0.87255E+09, 0.95076E+09, 0.10355E+10, 0.11272E+10, 0.12265E+10,
0.13339E+10, 0.14501E+10, 0.15756E+10, 0.17113E+10, 0.18577E+10,
0.20159E+10, 0.21865E+10, 0.23705E+10, 0.25688E+10, 0.27826E+10,
0.30129E+10, 0.32608E+10, 0.35277E+10, 0.38149E+10, 0.41237E+10,
0.44557E+10])
# --------------- CH3OH 2161: M = 39, I = 1 --------------------- not in TIPS-2011
M = 39
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# --------------- CH3Br 219: M = 40, I = 1 ---------------------
M = 40
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.70299E+04, 0.11847E+05, 0.17442E+05,
0.23741E+05, 0.30723E+05, 0.38408E+05, 0.46851E+05, 0.56138E+05,
0.66375E+05, 0.77692E+05, 0.90239E+05, 0.10418E+06, 0.11972E+06,
0.13704E+06, 0.15639E+06, 0.17801E+06, 0.20218E+06, 0.22920E+06,
0.25940E+06, 0.29316E+06, 0.33087E+06, 0.37296E+06, 0.41992E+06,
0.47229E+06, 0.53062E+06, 0.59557E+06, 0.66781E+06, 0.74812E+06,
0.83731E+06, 0.93629E+06, 0.10461E+07, 0.11677E+07, 0.13023E+07,
0.14513E+07, 0.16159E+07, 0.17978E+07, 0.19985E+07, 0.22199E+07,
0.24638E+07, 0.27324E+07, 0.30280E+07, 0.33529E+07, 0.37099E+07,
0.41019E+07, 0.45319E+07, 0.50034E+07, 0.55199E+07, 0.60853E+07,
0.67039E+07, 0.73801E+07, 0.81189E+07, 0.89255E+07, 0.98056E+07,
0.10765E+08, 0.11811E+08, 0.12949E+08, 0.14188E+08, 0.15535E+08,
0.17000E+08, 0.18590E+08, 0.20317E+08, 0.22190E+08, 0.24220E+08,
0.26421E+08, 0.28804E+08, 0.31383E+08, 0.34173E+08, 0.37189E+08,
0.40448E+08, 0.43967E+08, 0.47765E+08, 0.51862E+08, 0.56280E+08,
0.61040E+08, 0.66167E+08, 0.71686E+08, 0.77624E+08, 0.84009E+08,
0.90873E+08, 0.98247E+08, 0.10616E+09, 0.11466E+09, 0.12378E+09,
0.13356E+09, 0.14403E+09, 0.15526E+09, 0.16728E+09, 0.18014E+09,
0.19391E+09, 0.20863E+09, 0.22436E+09, 0.24117E+09, 0.25913E+09,
0.27830E+09, 0.29875E+09, 0.32057E+09, 0.34384E+09, 0.36864E+09,
0.39506E+09, 0.42320E+09, 0.45316E+09, 0.48504E+09, 0.51896E+09,
0.55502E+09, 0.59336E+09, 0.63410E+09, 0.67738E+09, 0.72334E+09,
0.77212E+09, 0.82388E+09, 0.87879E+09, 0.93701E+09, 0.99873E+09,
0.10641E+10, 0.11334E+10, 0.12068E+10, 0.12845E+10, 0.13667E+10,
0.14536E+10])
# --------------- CH3Br 211: M = 40, I = 2 ---------------------
M = 40
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.70566E+04, 0.11892E+05, 0.17508E+05,
0.23832E+05, 0.30841E+05, 0.38557E+05, 0.47036E+05, 0.56362E+05,
0.66644E+05, 0.78011E+05, 0.90615E+05, 0.10462E+06, 0.12023E+06,
0.13763E+06, 0.15707E+06, 0.17880E+06, 0.20308E+06, 0.23023E+06,
0.26059E+06, 0.29451E+06, 0.33240E+06, 0.37471E+06, 0.42191E+06,
0.47453E+06, 0.53316E+06, 0.59843E+06, 0.67104E+06, 0.75176E+06,
0.84141E+06, 0.94090E+06, 0.10512E+07, 0.11735E+07, 0.13088E+07,
0.14585E+07, 0.16241E+07, 0.18069E+07, 0.20086E+07, 0.22312E+07,
0.24764E+07, 0.27464E+07, 0.30435E+07, 0.33702E+07, 0.37291E+07,
0.41231E+07, 0.45554E+07, 0.50294E+07, 0.55486E+07, 0.61171E+07,
0.67389E+07, 0.74188E+07, 0.81616E+07, 0.89725E+07, 0.98573E+07,
0.10822E+08, 0.11873E+08, 0.13018E+08, 0.14263E+08, 0.15618E+08,
0.17090E+08, 0.18689E+08, 0.20425E+08, 0.22308E+08, 0.24350E+08,
0.26563E+08, 0.28959E+08, 0.31552E+08, 0.34357E+08, 0.37389E+08,
0.40666E+08, 0.44204E+08, 0.48023E+08, 0.52143E+08, 0.56585E+08,
0.61371E+08, 0.66526E+08, 0.72076E+08, 0.78046E+08, 0.84467E+08,
0.91369E+08, 0.98783E+08, 0.10674E+09, 0.11529E+09, 0.12446E+09,
0.13429E+09, 0.14482E+09, 0.15611E+09, 0.16820E+09, 0.18113E+09,
0.19497E+09, 0.20978E+09, 0.22560E+09, 0.24250E+09, 0.26056E+09,
0.27983E+09, 0.30040E+09, 0.32234E+09, 0.34574E+09, 0.37068E+09,
0.39725E+09, 0.42555E+09, 0.45567E+09, 0.48773E+09, 0.52184E+09,
0.55811E+09, 0.59666E+09, 0.63763E+09, 0.68115E+09, 0.72736E+09,
0.77642E+09, 0.82847E+09, 0.88368E+09, 0.94223E+09, 0.10043E+10,
0.10701E+10, 0.11397E+10, 0.12135E+10, 0.12916E+10, 0.13743E+10,
0.14618E+10])
# --------------- CH3CN 2124: M = 41, I = 1 ---------------------
M = 41
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(3.)
TIPS_ISO_HASH[(M, I)] = float32([0.54361E+04, 0.91953E+04, 0.13708E+05,
0.19097E+05, 0.25531E+05, 0.33206E+05, 0.42337E+05, 0.53173E+05,
0.66002E+05, 0.81163E+05, 0.99053E+05, 0.12014E+06, 0.14496E+06,
0.17414E+06, 0.20843E+06, 0.24866E+06, 0.29580E+06, 0.35099E+06,
0.41551E+06, 0.49085E+06, 0.57871E+06, 0.68104E+06, 0.80008E+06,
0.93836E+06, 0.10988E+07, 0.12848E+07, 0.14999E+07, 0.17487E+07,
0.20359E+07, 0.23670E+07, 0.27484E+07, 0.31871E+07, 0.36912E+07,
0.42697E+07, 0.49328E+07, 0.56921E+07, 0.65605E+07, 0.75526E+07,
0.86847E+07, 0.99753E+07, 0.11445E+08, 0.13116E+08, 0.15016E+08,
0.17172E+08, 0.19617E+08, 0.22386E+08, 0.25520E+08, 0.29063E+08,
0.33064E+08, 0.37578E+08, 0.42667E+08, 0.48397E+08, 0.54844E+08,
0.62090E+08, 0.70228E+08, 0.79358E+08, 0.89592E+08, 0.10105E+09,
0.11388E+09, 0.12822E+09, 0.14424E+09, 0.16212E+09, 0.18205E+09,
0.20427E+09, 0.22900E+09, 0.25652E+09, 0.28710E+09, 0.32107E+09,
0.35877E+09, 0.40059E+09, 0.44692E+09, 0.49822E+09, 0.55500E+09,
0.61777E+09, 0.68712E+09, 0.76370E+09, 0.84819E+09, 0.94135E+09,
0.10440E+10, 0.11570E+10, 0.12814E+10, 0.14181E+10, 0.15684E+10,
0.17334E+10, 0.19145E+10, 0.21131E+10, 0.23308E+10, 0.25693E+10,
0.28304E+10, 0.31161E+10, 0.34285E+10, 0.37698E+10, 0.41426E+10,
0.45496E+10, 0.49935E+10, 0.54776E+10, 0.60051E+10, 0.65796E+10,
0.72049E+10, 0.78853E+10, 0.86251E+10, 0.94291E+10, 0.10303E+11,
0.11251E+11, 0.12280E+11, 0.13396E+11, 0.14606E+11, 0.15916E+11,
0.17336E+11, 0.18873E+11, 0.20536E+11, 0.22334E+11, 0.24278E+11,
0.26379E+11, 0.28647E+11, 0.31096E+11, 0.33739E+11, 0.36589E+11,
0.39661E+11])
# --------------- CH3CN 2134: M = 41, I = 2 --------------------- not in HITRAN-2012
M = 41
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.10906E+05, 0.18458E+05, 0.27552E+05,
0.38455E+05, 0.51523E+05, 0.67161E+05, 0.85818E+05, 0.10801E+06,
0.13434E+06, 0.16550E+06, 0.20234E+06, 0.24581E+06, 0.29705E+06,
0.35737E+06, 0.42831E+06, 0.51162E+06, 0.60936E+06, 0.72387E+06,
0.85786E+06, 0.10145E+07, 0.11972E+07, 0.14102E+07, 0.16582E+07,
0.19465E+07, 0.22813E+07, 0.26695E+07, 0.31190E+07, 0.36390E+07,
0.42397E+07, 0.49328E+07, 0.57314E+07, 0.66507E+07, 0.77076E+07,
0.89211E+07, 0.10313E+08, 0.11907E+08, 0.13732E+08, 0.15817E+08,
0.18198E+08, 0.20914E+08, 0.24007E+08, 0.27527E+08, 0.31529E+08,
0.36073E+08, 0.41228E+08, 0.47070E+08, 0.53683E+08, 0.61162E+08,
0.69612E+08, 0.79149E+08, 0.89903E+08, 0.10202E+09, 0.11565E+09,
0.13098E+09, 0.14820E+09, 0.16753E+09, 0.18921E+09, 0.21349E+09,
0.24066E+09, 0.27106E+09, 0.30502E+09, 0.34293E+09, 0.38523E+09,
0.43237E+09, 0.48486E+09, 0.54328E+09, 0.60823E+09, 0.68039E+09,
0.76049E+09, 0.84935E+09, 0.94784E+09, 0.10569E+10, 0.11777E+10,
0.13112E+10, 0.14588E+10, 0.16217E+10, 0.18016E+10, 0.19999E+10,
0.22185E+10, 0.24592E+10, 0.27241E+10, 0.30155E+10, 0.33357E+10,
0.36875E+10, 0.40736E+10, 0.44971E+10, 0.49615E+10, 0.54702E+10,
0.60273E+10, 0.66369E+10, 0.73035E+10, 0.80322E+10, 0.88282E+10,
0.96972E+10, 0.10645E+11, 0.11679E+11, 0.12806E+11, 0.14034E+11,
0.15370E+11, 0.16824E+11, 0.18406E+11, 0.20125E+11, 0.21992E+11,
0.24020E+11, 0.26221E+11, 0.28608E+11, 0.31197E+11, 0.34002E+11,
0.37040E+11, 0.40330E+11, 0.43889E+11, 0.47739E+11, 0.51902E+11,
0.56400E+11, 0.61259E+11, 0.66504E+11, 0.72165E+11, 0.78272E+11,
0.84856E+11])
# --------------- CH3CN 3124: M = 41, I = 3 --------------------- not in HITRAN-2012
M = 41
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.11223E+05, 0.18985E+05, 0.28307E+05,
0.39441E+05, 0.52744E+05, 0.68620E+05, 0.87523E+05, 0.10997E+06,
0.13658E+06, 0.16806E+06, 0.20524E+06, 0.24910E+06, 0.30080E+06,
0.36165E+06, 0.43319E+06, 0.51722E+06, 0.61579E+06, 0.73127E+06,
0.86640E+06, 0.10243E+07, 0.12086E+07, 0.14234E+07, 0.16735E+07,
0.19642E+07, 0.23017E+07, 0.26931E+07, 0.31464E+07, 0.36706E+07,
0.42762E+07, 0.49749E+07, 0.57801E+07, 0.67069E+07, 0.77722E+07,
0.89955E+07, 0.10398E+08, 0.12006E+08, 0.13845E+08, 0.15947E+08,
0.18346E+08, 0.21083E+08, 0.24201E+08, 0.27748E+08, 0.31781E+08,
0.36361E+08, 0.41556E+08, 0.47442E+08, 0.54106E+08, 0.61643E+08,
0.70157E+08, 0.79767E+08, 0.90604E+08, 0.10281E+09, 0.11655E+09,
0.13199E+09, 0.14935E+09, 0.16882E+09, 0.19065E+09, 0.21512E+09,
0.24250E+09, 0.27312E+09, 0.30733E+09, 0.34553E+09, 0.38814E+09,
0.43562E+09, 0.48851E+09, 0.54736E+09, 0.61279E+09, 0.68548E+09,
0.76617E+09, 0.85568E+09, 0.95489E+09, 0.10648E+10, 0.11864E+10,
0.13209E+10, 0.14695E+10, 0.16337E+10, 0.18148E+10, 0.20146E+10,
0.22348E+10, 0.24772E+10, 0.27441E+10, 0.30375E+10, 0.33601E+10,
0.37143E+10, 0.41032E+10, 0.45298E+10, 0.49975E+10, 0.55099E+10,
0.60709E+10, 0.66849E+10, 0.73563E+10, 0.80902E+10, 0.88918E+10,
0.97670E+10, 0.10722E+11, 0.11763E+11, 0.12898E+11, 0.14134E+11,
0.15480E+11, 0.16945E+11, 0.18537E+11, 0.20269E+11, 0.22149E+11,
0.24191E+11, 0.26408E+11, 0.28812E+11, 0.31419E+11, 0.34244E+11,
0.37303E+11, 0.40616E+11, 0.44201E+11, 0.48078E+11, 0.52269E+11,
0.56799E+11, 0.61692E+11, 0.66974E+11, 0.72675E+11, 0.78824E+11,
0.85454E+11])
# --------------- CH3CN 3134: M = 41, I = 4 --------------------- not in HITRAN-2012
M = 41
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.22522E+05, 0.38117E+05, 0.56899E+05,
0.79412E+05, 0.10640E+06, 0.13870E+06, 0.17726E+06, 0.22314E+06,
0.27761E+06, 0.34214E+06, 0.41847E+06, 0.50862E+06, 0.61497E+06,
0.74028E+06, 0.88774E+06, 0.10611E+07, 0.12646E+07, 0.15031E+07,
0.17825E+07, 0.21092E+07, 0.24908E+07, 0.29358E+07, 0.34541E+07,
0.40571E+07, 0.47576E+07, 0.55703E+07, 0.65120E+07, 0.76018E+07,
0.88614E+07, 0.10315E+08, 0.11992E+08, 0.13922E+08, 0.16142E+08,
0.18693E+08, 0.21619E+08, 0.24973E+08, 0.28812E+08, 0.33202E+08,
0.38216E+08, 0.43936E+08, 0.50455E+08, 0.57876E+08, 0.66315E+08,
0.75901E+08, 0.86779E+08, 0.99110E+08, 0.11307E+09, 0.12887E+09,
0.14672E+09, 0.16688E+09, 0.18961E+09, 0.21523E+09, 0.24407E+09,
0.27651E+09, 0.31295E+09, 0.35387E+09, 0.39975E+09, 0.45118E+09,
0.50875E+09, 0.57315E+09, 0.64512E+09, 0.72549E+09, 0.81517E+09,
0.91514E+09, 0.10265E+10, 0.11504E+10, 0.12883E+10, 0.14414E+10,
0.16115E+10, 0.18001E+10, 0.20093E+10, 0.22410E+10, 0.24975E+10,
0.27812E+10, 0.30948E+10, 0.34412E+10, 0.38235E+10, 0.42452E+10,
0.47101E+10, 0.52220E+10, 0.57856E+10, 0.64055E+10, 0.70869E+10,
0.78355E+10, 0.86574E+10, 0.95591E+10, 0.10548E+11, 0.11631E+11,
0.12817E+11, 0.14116E+11, 0.15536E+11, 0.17088E+11, 0.18785E+11,
0.20636E+11, 0.22657E+11, 0.24861E+11, 0.27264E+11, 0.29881E+11,
0.32730E+11, 0.35832E+11, 0.39205E+11, 0.42871E+11, 0.46855E+11,
0.51182E+11, 0.55878E+11, 0.60973E+11, 0.66497E+11, 0.72484E+11,
0.78970E+11, 0.85992E+11, 0.93592E+11, 0.10181E+12, 0.11070E+12,
0.12031E+12, 0.13069E+12, 0.14189E+12, 0.15398E+12, 0.16703E+12,
0.18110E+12])
# --------------- CF4 29: M = 42, I = 1 ---------------------
M = 42
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.76233E+04, 0.12867E+05, 0.19059E+05,
0.26316E+05, 0.34895E+05, 0.45145E+05, 0.57461E+05, 0.72259E+05,
0.89950E+05, 0.11092E+06, 0.13550E+06, 0.16399E+06, 0.19658E+06,
0.23341E+06, 0.27457E+06, 0.32004E+06, 0.36978E+06, 0.42369E+06,
0.48161E+06, 0.54338E+06, 0.60880E+06, 0.67764E+06, 0.55684E+07,
0.71250E+07, 0.90615E+07, 0.11458E+08, 0.14407E+08, 0.18021E+08,
0.22428E+08, 0.27778E+08, 0.34247E+08, 0.42038E+08, 0.51386E+08,
0.62559E+08, 0.75869E+08, 0.91670E+08, 0.11037E+09, 0.13242E+09,
0.15836E+09, 0.18878E+09, 0.22436E+09, 0.26584E+09, 0.31410E+09,
0.37008E+09, 0.43488E+09, 0.50970E+09, 0.59589E+09, 0.69496E+09,
0.80858E+09, 0.93863E+09, 0.10872E+10, 0.12565E+10, 0.14491E+10,
0.16679E+10, 0.19159E+10, 0.21966E+10, 0.25136E+10, 0.28711E+10,
0.32740E+10, 0.37260E+10, 0.42340E+10, 0.48030E+10, 0.54400E+10,
0.61520E+10, 0.69470E+10, 0.78320E+10, 0.88170E+10, 0.99120E+10,
0.11130E+11, 0.12470E+11, 0.13970E+11, 0.15620E+11, 0.17440E+11,
0.19450E+11, 0.21670E+11, 0.24100E+11, 0.26790E+11, 0.29730E+11,
0.33000E+11, 0.36500E+11, 0.40400E+11, 0.44600E+11, 0.49300E+11,
0.54300E+11, 0.59800E+11, 0.65800E+11, 0.72400E+11, 0.79500E+11,
0.87200E+11, 0.95500E+11, 0.10500E+12, 0.11400E+12, 0.12500E+12,
0.13600E+12, 0.14900E+12, 0.16200E+12, 0.17700E+12, 0.19200E+12,
0.21000E+12, 0.23000E+12, 0.25000E+12, 0.27000E+12, 0.29000E+12,
0.31000E+12, 0.34000E+12, 0.36000E+12, 0.39000E+12, 0.42000E+12,
0.46000E+12, 0.49000E+12, 0.53000E+12, 0.57000E+12, 0.61000E+12,
0.66000E+12, 0.70000E+12, 0.75000E+12, 0.81000E+12, 0.86000E+12,
0.93000E+12])
# --------------- C4H2 1221: M = 43, I = 1 ---------------------
M = 43
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.57628E+03, 0.84874E+03, 0.11789E+04,
0.15952E+04, 0.21317E+04, 0.28324E+04, 0.37543E+04, 0.49705E+04,
0.65754E+04, 0.86894E+04, 0.11466E+05, 0.15099E+05, 0.19834E+05,
0.25980E+05, 0.33920E+05, 0.44132E+05, 0.57210E+05, 0.73884E+05,
0.95049E+05, 0.12180E+06, 0.15548E+06, 0.19771E+06, 0.25045E+06,
0.31606E+06, 0.39739E+06, 0.49786E+06, 0.62152E+06, 0.77324E+06,
0.95878E+06, 0.11850E+07, 0.14599E+07, 0.17930E+07, 0.21956E+07,
0.26807E+07, 0.32637E+07, 0.39626E+07, 0.47983E+07, 0.57951E+07,
0.69813E+07, 0.83896E+07, 0.10058E+08, 0.12030E+08, 0.14356E+08,
0.17093E+08, 0.20309E+08, 0.24079E+08, 0.28491E+08, 0.33644E+08,
0.39651E+08, 0.46642E+08, 0.54764E+08, 0.64184E+08, 0.75091E+08,
0.87699E+08, 0.10225E+09, 0.11902E+09, 0.13832E+09, 0.16049E+09,
0.18593E+09, 0.21507E+09, 0.24841E+09, 0.28650E+09, 0.32996E+09,
0.37949E+09, 0.43586E+09, 0.49993E+09, 0.57266E+09, 0.65513E+09,
0.74852E+09, 0.85418E+09, 0.97356E+09, 0.11083E+10, 0.12602E+10,
0.14313E+10, 0.16238E+10, 0.18401E+10, 0.20829E+10, 0.23553E+10,
0.26605E+10, 0.30021E+10, 0.33841E+10, 0.38109E+10, 0.42874E+10,
0.48187E+10, 0.54107E+10, 0.60698E+10, 0.68029E+10, 0.76176E+10,
0.85223E+10, 0.95260E+10, 0.10639E+11, 0.11871E+11, 0.13236E+11,
0.14744E+11, 0.16412E+11, 0.18253E+11, 0.20285E+11, 0.22526E+11,
0.24995E+11, 0.27714E+11, 0.30705E+11, 0.33995E+11, 0.37609E+11,
0.41579E+11, 0.45934E+11, 0.50711E+11, 0.55947E+11, 0.61681E+11,
0.67957E+11, 0.74824E+11, 0.82330E+11, 0.90532E+11, 0.99487E+11,
0.10926E+12, 0.11992E+12, 0.13154E+12, 0.14420E+12, 0.15799E+12,
0.17299E+12])
# --------------- HC3N 12224: M = 44, I = 1 --------------------- 1224 in HITRAN, 12224 in TIPS
M = 44
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.16683E+04, 0.24538E+04, 0.33995E+04,
0.45769E+04, 0.60637E+04, 0.79533E+04, 0.10360E+05, 0.13422E+05,
0.17311E+05, 0.22232E+05, 0.28434E+05, 0.36215E+05, 0.45932E+05,
0.58011E+05, 0.72958E+05, 0.91370E+05, 0.11395E+06, 0.14153E+06,
0.17507E+06, 0.21570E+06, 0.26475E+06, 0.32372E+06, 0.39440E+06,
0.47881E+06, 0.57930E+06, 0.69856E+06, 0.83968E+06, 0.10062E+07,
0.12021E+07, 0.14320E+07, 0.17011E+07, 0.20153E+07, 0.23812E+07,
0.28065E+07, 0.32996E+07, 0.38701E+07, 0.45287E+07, 0.52876E+07,
0.61602E+07, 0.71616E+07, 0.83088E+07, 0.96206E+07, 0.11118E+08,
0.12824E+08, 0.14765E+08, 0.16969E+08, 0.19469E+08, 0.22299E+08,
0.25498E+08, 0.29110E+08, 0.33181E+08, 0.37763E+08, 0.42914E+08,
0.48697E+08, 0.55180E+08, 0.62440E+08, 0.70558E+08, 0.79627E+08,
0.89743E+08, 0.10102E+09, 0.11356E+09, 0.12752E+09, 0.14301E+09,
0.16020E+09, 0.17925E+09, 0.20035E+09, 0.22367E+09, 0.24945E+09,
0.27790E+09, 0.30928E+09, 0.34385E+09, 0.38191E+09, 0.42376E+09,
0.46975E+09, 0.52023E+09, 0.57562E+09, 0.63632E+09, 0.70279E+09,
0.77553E+09, 0.85506E+09, 0.94195E+09, 0.10368E+10, 0.11403E+10,
0.12531E+10, 0.13759E+10, 0.15097E+10, 0.16552E+10, 0.18133E+10,
0.19851E+10, 0.21715E+10, 0.23738E+10, 0.25931E+10, 0.28307E+10,
0.30879E+10, 0.33662E+10, 0.36672E+10, 0.39926E+10, 0.43439E+10,
0.47233E+10, 0.51325E+10, 0.55738E+10, 0.60493E+10, 0.65615E+10,
0.71129E+10, 0.77061E+10, 0.83441E+10, 0.90298E+10, 0.97664E+10,
0.10557E+11, 0.11406E+11, 0.12317E+11, 0.13293E+11, 0.14339E+11,
0.15459E+11, 0.16659E+11, 0.17942E+11, 0.19316E+11, 0.20784E+11,
0.22353E+11])
# --------------- HC3N 12234: M = 44, I = 2 --------------------- see above
M = 44
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.33507E+04, 0.49290E+04, 0.68293E+04,
0.91959E+04, 0.12185E+05, 0.15986E+05, 0.20828E+05, 0.26993E+05,
0.34824E+05, 0.44739E+05, 0.57239E+05, 0.72931E+05, 0.92539E+05,
0.11693E+06, 0.14713E+06, 0.18435E+06, 0.23004E+06, 0.28588E+06,
0.35384E+06, 0.43625E+06, 0.53580E+06, 0.65562E+06, 0.79933E+06,
0.97115E+06, 0.11759E+07, 0.14191E+07, 0.17073E+07, 0.20476E+07,
0.24486E+07, 0.29196E+07, 0.34716E+07, 0.41169E+07, 0.48696E+07,
0.57453E+07, 0.67621E+07, 0.79402E+07, 0.93022E+07, 0.10874E+08,
0.12684E+08, 0.14764E+08, 0.17150E+08, 0.19884E+08, 0.23009E+08,
0.26576E+08, 0.30641E+08, 0.35265E+08, 0.40518E+08, 0.46477E+08,
0.53225E+08, 0.60856E+08, 0.69475E+08, 0.79195E+08, 0.90143E+08,
0.10246E+09, 0.11629E+09, 0.13182E+09, 0.14921E+09, 0.16868E+09,
0.19045E+09, 0.21477E+09, 0.24189E+09, 0.27211E+09, 0.30575E+09,
0.34316E+09, 0.38471E+09, 0.43083E+09, 0.48196E+09, 0.53858E+09,
0.60125E+09, 0.67052E+09, 0.74704E+09, 0.83148E+09, 0.92459E+09,
0.10272E+10, 0.11401E+10, 0.12643E+10, 0.14007E+10, 0.15506E+10,
0.17150E+10, 0.18953E+10, 0.20928E+10, 0.23090E+10, 0.25456E+10,
0.28042E+10, 0.30867E+10, 0.33951E+10, 0.37316E+10, 0.40984E+10,
0.44981E+10, 0.49332E+10, 0.54067E+10, 0.59216E+10, 0.64812E+10,
0.70890E+10, 0.77488E+10, 0.84645E+10, 0.92405E+10, 0.10081E+11,
0.10992E+11, 0.11978E+11, 0.13044E+11, 0.14197E+11, 0.15443E+11,
0.16789E+11, 0.18243E+11, 0.19810E+11, 0.21501E+11, 0.23324E+11,
0.25288E+11, 0.27403E+11, 0.29680E+11, 0.32130E+11, 0.34764E+11,
0.37596E+11, 0.40639E+11, 0.43907E+11, 0.47416E+11, 0.51181E+11,
0.55220E+11])
# --------------- HC3N 12324: M = 44, I = 3 --------------------- see above
M = 44
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.33506E+04, 0.49280E+04, 0.68267E+04,
0.91901E+04, 0.12174E+05, 0.15966E+05, 0.20793E+05, 0.26936E+05,
0.34734E+05, 0.44598E+05, 0.57026E+05, 0.72612E+05, 0.92071E+05,
0.11625E+06, 0.14616E+06, 0.18298E+06, 0.22813E+06, 0.28323E+06,
0.35022E+06, 0.43133E+06, 0.52918E+06, 0.64677E+06, 0.78761E+06,
0.95571E+06, 0.11557E+07, 0.13929E+07, 0.16734E+07, 0.20041E+07,
0.23929E+07, 0.28488E+07, 0.33820E+07, 0.40040E+07, 0.47280E+07,
0.55686E+07, 0.65423E+07, 0.76678E+07, 0.89661E+07, 0.10460E+08,
0.12177E+08, 0.14145E+08, 0.16397E+08, 0.18970E+08, 0.21903E+08,
0.25242E+08, 0.29036E+08, 0.33339E+08, 0.38214E+08, 0.43726E+08,
0.49949E+08, 0.56965E+08, 0.64864E+08, 0.73743E+08, 0.83711E+08,
0.94886E+08, 0.10740E+09, 0.12139E+09, 0.13701E+09, 0.15443E+09,
0.17384E+09, 0.19543E+09, 0.21943E+09, 0.24607E+09, 0.27561E+09,
0.30832E+09, 0.34452E+09, 0.38453E+09, 0.42870E+09, 0.47742E+09,
0.53110E+09, 0.59020E+09, 0.65518E+09, 0.72659E+09, 0.80496E+09,
0.89092E+09, 0.98510E+09, 0.10882E+10, 0.12010E+10, 0.13242E+10,
0.14588E+10, 0.16056E+10, 0.17657E+10, 0.19401E+10, 0.21299E+10,
0.23363E+10, 0.25606E+10, 0.28043E+10, 0.30687E+10, 0.33553E+10,
0.36660E+10, 0.40024E+10, 0.43665E+10, 0.47601E+10, 0.51856E+10,
0.56450E+10, 0.61408E+10, 0.66756E+10, 0.72520E+10, 0.78729E+10,
0.85413E+10, 0.92604E+10, 0.10034E+11, 0.10864E+11, 0.11757E+11,
0.12714E+11, 0.13742E+11, 0.14843E+11, 0.16023E+11, 0.17287E+11,
0.18640E+11, 0.20087E+11, 0.21634E+11, 0.23288E+11, 0.25054E+11,
0.26939E+11, 0.28950E+11, 0.31096E+11, 0.33382E+11, 0.35819E+11,
0.38413E+11])
# --------------- HC3N 13224: M = 44, I = 4 --------------------- see above
M = 44
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(12.)
TIPS_ISO_HASH[(M, I)] = float32([0.34439E+04, 0.50672E+04, 0.70230E+04,
0.94603E+04, 0.12542E+05, 0.16462E+05, 0.21461E+05, 0.27833E+05,
0.35935E+05, 0.46204E+05, 0.59168E+05, 0.75463E+05, 0.95854E+05,
0.12126E+06, 0.15276E+06, 0.19165E+06, 0.23947E+06, 0.29802E+06,
0.36943E+06, 0.45619E+06, 0.56121E+06, 0.68789E+06, 0.84018E+06,
0.10227E+07, 0.12407E+07, 0.15003E+07, 0.18086E+07, 0.21738E+07,
0.26052E+07, 0.31134E+07, 0.37106E+07, 0.44109E+07, 0.52300E+07,
0.61861E+07, 0.72996E+07, 0.85939E+07, 0.10095E+08, 0.11833E+08,
0.13841E+08, 0.16158E+08, 0.18825E+08, 0.21890E+08, 0.25407E+08,
0.29436E+08, 0.34045E+08, 0.39308E+08, 0.45309E+08, 0.52143E+08,
0.59912E+08, 0.68734E+08, 0.78737E+08, 0.90065E+08, 0.10288E+09,
0.11735E+09, 0.13367E+09, 0.15206E+09, 0.17277E+09, 0.19604E+09,
0.22217E+09, 0.25148E+09, 0.28432E+09, 0.32108E+09, 0.36218E+09,
0.40809E+09, 0.45932E+09, 0.51644E+09, 0.58004E+09, 0.65082E+09,
0.72950E+09, 0.81690E+09, 0.91388E+09, 0.10214E+10, 0.11405E+10,
0.12724E+10, 0.14182E+10, 0.15794E+10, 0.17573E+10, 0.19536E+10,
0.21701E+10, 0.24086E+10, 0.26711E+10, 0.29599E+10, 0.32774E+10,
0.36262E+10, 0.40090E+10, 0.44290E+10, 0.48895E+10, 0.53939E+10,
0.59462E+10, 0.65504E+10, 0.72111E+10, 0.79332E+10, 0.87217E+10,
0.95823E+10, 0.10521E+11, 0.11544E+11, 0.12659E+11, 0.13874E+11,
0.15195E+11, 0.16632E+11, 0.18194E+11, 0.19892E+11, 0.21735E+11,
0.23736E+11, 0.25907E+11, 0.28260E+11, 0.30810E+11, 0.33572E+11,
0.36563E+11, 0.39799E+11, 0.43299E+11, 0.47083E+11, 0.51172E+11,
0.55588E+11, 0.60355E+11, 0.65500E+11, 0.71049E+11, 0.77031E+11,
0.83478E+11])
# --------------- HC3N 12225: M = 44, I = 5 --------------------- see above
M = 44
I = 5
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.11455E+04, 0.16850E+04, 0.23345E+04,
0.31432E+04, 0.41647E+04, 0.54630E+04, 0.71168E+04, 0.92219E+04,
0.11895E+05, 0.15279E+05, 0.19545E+05, 0.24897E+05, 0.31584E+05,
0.39899E+05, 0.50190E+05, 0.62871E+05, 0.78428E+05, 0.97434E+05,
0.12056E+06, 0.14859E+06, 0.18243E+06, 0.22314E+06, 0.27194E+06,
0.33026E+06, 0.39972E+06, 0.48219E+06, 0.57983E+06, 0.69509E+06,
0.83077E+06, 0.99009E+06, 0.11767E+07, 0.13946E+07, 0.16487E+07,
0.19441E+07, 0.22868E+07, 0.26836E+07, 0.31420E+07, 0.36704E+07,
0.42786E+07, 0.49770E+07, 0.57776E+07, 0.66938E+07, 0.77404E+07,
0.89339E+07, 0.10293E+08, 0.11837E+08, 0.13590E+08, 0.15576E+08,
0.17823E+08, 0.20362E+08, 0.23227E+08, 0.26454E+08, 0.30085E+08,
0.34166E+08, 0.38745E+08, 0.43877E+08, 0.49622E+08, 0.56046E+08,
0.63219E+08, 0.71222E+08, 0.80138E+08, 0.90062E+08, 0.10110E+09,
0.11335E+09, 0.12695E+09, 0.14202E+09, 0.15870E+09, 0.17716E+09,
0.19756E+09, 0.22009E+09, 0.24493E+09, 0.27232E+09, 0.30247E+09,
0.33565E+09, 0.37211E+09, 0.41217E+09, 0.45613E+09, 0.50433E+09,
0.55714E+09, 0.61497E+09, 0.67823E+09, 0.74739E+09, 0.82293E+09,
0.90540E+09, 0.99536E+09, 0.10934E+10, 0.12002E+10, 0.13165E+10,
0.14430E+10, 0.15805E+10, 0.17299E+10, 0.18922E+10, 0.20682E+10,
0.22591E+10, 0.24660E+10, 0.26901E+10, 0.29326E+10, 0.31951E+10,
0.34788E+10, 0.37854E+10, 0.41166E+10, 0.44741E+10, 0.48598E+10,
0.52758E+10, 0.57240E+10, 0.62069E+10, 0.67269E+10, 0.72864E+10,
0.78882E+10, 0.85352E+10, 0.92305E+10, 0.99773E+10, 0.10779E+11,
0.11639E+11, 0.12562E+11, 0.13552E+11, 0.14612E+11, 0.15748E+11,
0.16964E+11])
# --------------- HC3N 22224: M = 44, I = 6 --------------------- see above
M = 44
I = 6
TIPS_GSI_HASH[(M, I)] = __FloatType__(9.)
TIPS_ISO_HASH[(M, I)] = float32([0.27029E+04, 0.39999E+04, 0.55894E+04,
0.76092E+04, 0.10219E+05, 0.13616E+05, 0.18042E+05, 0.23798E+05,
0.31255E+05, 0.40867E+05, 0.53189E+05, 0.68897E+05, 0.88807E+05,
0.11390E+06, 0.14537E+06, 0.18461E+06, 0.23330E+06, 0.29342E+06,
0.36733E+06, 0.45779E+06, 0.56802E+06, 0.70182E+06, 0.86361E+06,
0.10585E+07, 0.12925E+07, 0.15725E+07, 0.19064E+07, 0.23034E+07,
0.27739E+07, 0.33302E+07, 0.39858E+07, 0.47566E+07, 0.56604E+07,
0.67176E+07, 0.79511E+07, 0.93872E+07, 0.11055E+08, 0.12989E+08,
0.15225E+08, 0.17806E+08, 0.20779E+08, 0.24197E+08, 0.28119E+08,
0.32612E+08, 0.37749E+08, 0.43612E+08, 0.50294E+08, 0.57895E+08,
0.66528E+08, 0.76318E+08, 0.87403E+08, 0.99937E+08, 0.11409E+09,
0.13004E+09, 0.14800E+09, 0.16819E+09, 0.19086E+09, 0.21629E+09,
0.24476E+09, 0.27661E+09, 0.31219E+09, 0.35189E+09, 0.39615E+09,
0.44542E+09, 0.50021E+09, 0.56108E+09, 0.62862E+09, 0.70350E+09,
0.78641E+09, 0.87814E+09, 0.97952E+09, 0.10915E+10, 0.12149E+10,
0.13510E+10, 0.15008E+10, 0.16656E+10, 0.18468E+10, 0.20457E+10,
0.22640E+10, 0.25032E+10, 0.27653E+10, 0.30522E+10, 0.33659E+10,
0.37088E+10, 0.40832E+10, 0.44917E+10, 0.49371E+10, 0.54224E+10,
0.59508E+10, 0.65256E+10, 0.71507E+10, 0.78298E+10, 0.85671E+10,
0.93672E+10, 0.10235E+11, 0.11175E+11, 0.12193E+11, 0.13295E+11,
0.14487E+11, 0.15776E+11, 0.17168E+11, 0.18671E+11, 0.20293E+11,
0.22043E+11, 0.23929E+11, 0.25960E+11, 0.28148E+11, 0.30502E+11,
0.33034E+11, 0.35756E+11, 0.38681E+11, 0.41823E+11, 0.45195E+11,
0.48812E+11, 0.52692E+11, 0.56850E+11, 0.61306E+11, 0.66076E+11,
0.71183E+11])
# --------------- H2 11: M = 45, I = 1 ---------------------
M = 45
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.15265E+01, 0.22243E+01, 0.29619E+01,
0.36724E+01, 0.43456E+01, 0.49880E+01, 0.56090E+01, 0.62165E+01,
0.68161E+01, 0.74113E+01, 0.80044E+01, 0.85966E+01, 0.91887E+01,
0.97810E+01, 0.10374E+02, 0.10967E+02, 0.11561E+02, 0.12156E+02,
0.12751E+02, 0.13347E+02, 0.13944E+02, 0.14541E+02, 0.15139E+02,
0.15738E+02, 0.16337E+02, 0.16937E+02, 0.17538E+02, 0.18140E+02,
0.18743E+02, 0.19346E+02, 0.19951E+02, 0.20556E+02, 0.21163E+02,
0.21771E+02, 0.22379E+02, 0.22990E+02, 0.23601E+02, 0.24214E+02,
0.24829E+02, 0.25445E+02, 0.26063E+02, 0.26683E+02, 0.27304E+02,
0.27928E+02, 0.28553E+02, 0.29181E+02, 0.29811E+02, 0.30443E+02,
0.31078E+02, 0.31715E+02, 0.32355E+02, 0.32997E+02, 0.33643E+02,
0.34291E+02, 0.34942E+02, 0.35596E+02, 0.36253E+02, 0.36914E+02,
0.37578E+02, 0.38245E+02, 0.38916E+02, 0.39590E+02, 0.40268E+02,
0.40949E+02, 0.41635E+02, 0.42324E+02, 0.43017E+02, 0.43715E+02,
0.44416E+02, 0.45122E+02, 0.45831E+02, 0.46546E+02, 0.47264E+02,
0.47987E+02, 0.48714E+02, 0.49446E+02, 0.50183E+02, 0.50925E+02,
0.51671E+02, 0.52422E+02, 0.53178E+02, 0.53939E+02, 0.54705E+02,
0.55476E+02, 0.56252E+02, 0.57033E+02, 0.57820E+02, 0.58612E+02,
0.59409E+02, 0.60212E+02, 0.61020E+02, 0.61833E+02, 0.62652E+02,
0.63477E+02, 0.64308E+02, 0.65144E+02, 0.65986E+02, 0.66833E+02,
0.67687E+02, 0.68546E+02, 0.69411E+02, 0.70283E+02, 0.71160E+02,
0.72043E+02, 0.72933E+02, 0.73829E+02, 0.74730E+02, 0.75638E+02,
0.76553E+02, 0.77473E+02, 0.78400E+02, 0.79333E+02, 0.80273E+02,
0.81219E+02, 0.82172E+02, 0.83131E+02, 0.84097E+02, 0.85069E+02,
0.86048E+02])
# --------------- H2 12: M = 45, I = 2 ---------------------
M = 45
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(6.)
TIPS_ISO_HASH[(M, I)] = float32([0.81692E+01, 0.10308E+02, 0.12557E+02,
0.14848E+02, 0.17159E+02, 0.19482E+02, 0.21815E+02, 0.24153E+02,
0.26497E+02, 0.28845E+02, 0.31197E+02, 0.33552E+02, 0.35910E+02,
0.38272E+02, 0.40636E+02, 0.43002E+02, 0.45372E+02, 0.47744E+02,
0.50119E+02, 0.52496E+02, 0.54877E+02, 0.57261E+02, 0.59649E+02,
0.62040E+02, 0.64435E+02, 0.66835E+02, 0.69240E+02, 0.71650E+02,
0.74066E+02, 0.76489E+02, 0.78918E+02, 0.81354E+02, 0.83799E+02,
0.86252E+02, 0.88715E+02, 0.91187E+02, 0.93669E+02, 0.96163E+02,
0.98668E+02, 0.10118E+03, 0.10371E+03, 0.10626E+03, 0.10881E+03,
0.11138E+03, 0.11397E+03, 0.11657E+03, 0.11919E+03, 0.12182E+03,
0.12447E+03, 0.12714E+03, 0.12982E+03, 0.13252E+03, 0.13524E+03,
0.13798E+03, 0.14074E+03, 0.14352E+03, 0.14632E+03, 0.14914E+03,
0.15198E+03, 0.15484E+03, 0.15772E+03, 0.16062E+03, 0.16355E+03,
0.16649E+03, 0.16946E+03, 0.17246E+03, 0.17547E+03, 0.17851E+03,
0.18157E+03, 0.18466E+03, 0.18777E+03, 0.19090E+03, 0.19406E+03,
0.19725E+03, 0.20045E+03, 0.20369E+03, 0.20695E+03, 0.21023E+03,
0.21354E+03, 0.21687E+03, 0.22024E+03, 0.22362E+03, 0.22704E+03,
0.23048E+03, 0.23394E+03, 0.23744E+03, 0.24096E+03, 0.24451E+03,
0.24808E+03, 0.25169E+03, 0.25532E+03, 0.25897E+03, 0.26266E+03,
0.26638E+03, 0.27012E+03, 0.27389E+03, 0.27769E+03, 0.28152E+03,
0.28537E+03, 0.28926E+03, 0.29317E+03, 0.29712E+03, 0.30109E+03,
0.30509E+03, 0.30913E+03, 0.31319E+03, 0.31728E+03, 0.32140E+03,
0.32555E+03, 0.32974E+03, 0.33395E+03, 0.33819E+03, 0.34246E+03,
0.34677E+03, 0.35110E+03, 0.35547E+03, 0.35987E+03, 0.36429E+03,
0.36875E+03])
# --------------- CS 22: M = 46, I = 1 ---------------------
M = 46
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.51416E+02, 0.72723E+02, 0.94044E+02,
0.11538E+03, 0.13673E+03, 0.15810E+03, 0.17949E+03, 0.20093E+03,
0.22245E+03, 0.24407E+03, 0.26582E+03, 0.28776E+03, 0.30992E+03,
0.33233E+03, 0.35504E+03, 0.37807E+03, 0.40147E+03, 0.42525E+03,
0.44944E+03, 0.47406E+03, 0.49914E+03, 0.52468E+03, 0.55071E+03,
0.57723E+03, 0.60427E+03, 0.63183E+03, 0.65991E+03, 0.68854E+03,
0.71771E+03, 0.74743E+03, 0.77771E+03, 0.80855E+03, 0.83996E+03,
0.87193E+03, 0.90449E+03, 0.93762E+03, 0.97134E+03, 0.10056E+04,
0.10405E+04, 0.10760E+04, 0.11121E+04, 0.11487E+04, 0.11860E+04,
0.12239E+04, 0.12623E+04, 0.13014E+04, 0.13410E+04, 0.13813E+04,
0.14222E+04, 0.14637E+04, 0.15057E+04, 0.15484E+04, 0.15917E+04,
0.16357E+04, 0.16802E+04, 0.17253E+04, 0.17711E+04, 0.18175E+04,
0.18645E+04, 0.19121E+04, 0.19603E+04, 0.20091E+04, 0.20586E+04,
0.21087E+04, 0.21594E+04, 0.22107E+04, 0.22626E+04, 0.23152E+04,
0.23684E+04, 0.24222E+04, 0.24767E+04, 0.25317E+04, 0.25874E+04,
0.26438E+04, 0.27007E+04, 0.27583E+04, 0.28165E+04, 0.28754E+04,
0.29348E+04, 0.29949E+04, 0.30557E+04, 0.31170E+04, 0.31790E+04,
0.32417E+04, 0.33049E+04, 0.33688E+04, 0.34334E+04, 0.34986E+04,
0.35644E+04, 0.36308E+04, 0.36979E+04, 0.37656E+04, 0.38340E+04,
0.39030E+04, 0.39727E+04, 0.40430E+04, 0.41139E+04, 0.41855E+04,
0.42577E+04, 0.43306E+04, 0.44041E+04, 0.44782E+04, 0.45530E+04,
0.46284E+04, 0.47045E+04, 0.47813E+04, 0.48587E+04, 0.49367E+04,
0.50154E+04, 0.50947E+04, 0.51747E+04, 0.52553E+04, 0.53366E+04,
0.54185E+04, 0.55011E+04, 0.55844E+04, 0.56683E+04, 0.57528E+04,
0.58380E+04])
# --------------- CS 24: M = 46, I = 2 ---------------------
M = 46
I = 2
TIPS_GSI_HASH[(M, I)] = __FloatType__(1.)
TIPS_ISO_HASH[(M, I)] = float32([0.52247E+02, 0.73900E+02, 0.95568E+02,
0.11725E+03, 0.13895E+03, 0.16066E+03, 0.18241E+03, 0.20420E+03,
0.22607E+03, 0.24805E+03, 0.27018E+03, 0.29249E+03, 0.31503E+03,
0.33784E+03, 0.36096E+03, 0.38442E+03, 0.40824E+03, 0.43247E+03,
0.45712E+03, 0.48221E+03, 0.50778E+03, 0.53382E+03, 0.56037E+03,
0.58743E+03, 0.61501E+03, 0.64312E+03, 0.67179E+03, 0.70100E+03,
0.73077E+03, 0.76111E+03, 0.79202E+03, 0.82351E+03, 0.85559E+03,
0.88824E+03, 0.92149E+03, 0.95533E+03, 0.98977E+03, 0.10248E+04,
0.10605E+04, 0.10967E+04, 0.11336E+04, 0.11710E+04, 0.12091E+04,
0.12478E+04, 0.12871E+04, 0.13270E+04, 0.13675E+04, 0.14087E+04,
0.14505E+04, 0.14929E+04, 0.15359E+04, 0.15795E+04, 0.16238E+04,
0.16687E+04, 0.17142E+04, 0.17604E+04, 0.18071E+04, 0.18546E+04,
0.19026E+04, 0.19513E+04, 0.20006E+04, 0.20505E+04, 0.21011E+04,
0.21523E+04, 0.22042E+04, 0.22566E+04, 0.23098E+04, 0.23635E+04,
0.24179E+04, 0.24730E+04, 0.25286E+04, 0.25850E+04, 0.26419E+04,
0.26995E+04, 0.27578E+04, 0.28167E+04, 0.28762E+04, 0.29364E+04,
0.29972E+04, 0.30587E+04, 0.31208E+04, 0.31836E+04, 0.32470E+04,
0.33111E+04, 0.33758E+04, 0.34412E+04, 0.35072E+04, 0.35739E+04,
0.36412E+04, 0.37092E+04, 0.37778E+04, 0.38471E+04, 0.39171E+04,
0.39877E+04, 0.40589E+04, 0.41309E+04, 0.42034E+04, 0.42767E+04,
0.43505E+04, 0.44251E+04, 0.45003E+04, 0.45762E+04, 0.46527E+04,
0.47299E+04, 0.48077E+04, 0.48863E+04, 0.49654E+04, 0.50453E+04,
0.51258E+04, 0.52070E+04, 0.52888E+04, 0.53713E+04, 0.54545E+04,
0.55383E+04, 0.56229E+04, 0.57080E+04, 0.57939E+04, 0.58804E+04,
0.59676E+04])
# --------------- CS 32: M = 46, I = 3 ---------------------
M = 46
I = 3
TIPS_GSI_HASH[(M, I)] = __FloatType__(2.)
TIPS_ISO_HASH[(M, I)] = float32([0.10889E+03, 0.15403E+03, 0.19920E+03,
0.24440E+03, 0.28964E+03, 0.33491E+03, 0.38026E+03, 0.42571E+03,
0.47134E+03, 0.51722E+03, 0.56342E+03, 0.61005E+03, 0.65719E+03,
0.70493E+03, 0.75334E+03, 0.80249E+03, 0.85245E+03, 0.90329E+03,
0.95504E+03, 0.10078E+04, 0.10615E+04, 0.11163E+04, 0.11721E+04,
0.12291E+04, 0.12872E+04, 0.13464E+04, 0.14068E+04, 0.14684E+04,
0.15311E+04, 0.15951E+04, 0.16604E+04, 0.17268E+04, 0.17945E+04,
0.18635E+04, 0.19337E+04, 0.20051E+04, 0.20779E+04, 0.21519E+04,
0.22272E+04, 0.23038E+04, 0.23817E+04, 0.24609E+04, 0.25414E+04,
0.26232E+04, 0.27064E+04, 0.27908E+04, 0.28765E+04, 0.29636E+04,
0.30520E+04, 0.31417E+04, 0.32327E+04, 0.33251E+04, 0.34188E+04,
0.35138E+04, 0.36102E+04, 0.37079E+04, 0.38070E+04, 0.39074E+04,
0.40091E+04, 0.41122E+04, 0.42166E+04, 0.43224E+04, 0.44295E+04,
0.45380E+04, 0.46478E+04, 0.47590E+04, 0.48715E+04, 0.49854E+04,
0.51007E+04, 0.52173E+04, 0.53353E+04, 0.54547E+04, 0.55754E+04,
0.56975E+04, 0.58210E+04, 0.59458E+04, 0.60720E+04, 0.61996E+04,
0.63285E+04, 0.64589E+04, 0.65906E+04, 0.67236E+04, 0.68581E+04,
0.69940E+04, 0.71312E+04, 0.72698E+04, 0.74098E+04, 0.75512E+04,
0.76940E+04, 0.78381E+04, 0.79837E+04, 0.81307E+04, 0.82790E+04,
0.84287E+04, 0.85799E+04, 0.87324E+04, 0.88864E+04, 0.90417E+04,
0.91984E+04, 0.93566E+04, 0.95161E+04, 0.96771E+04, 0.98394E+04,
0.10003E+05, 0.10168E+05, 0.10335E+05, 0.10503E+05, 0.10672E+05,
0.10843E+05, 0.11015E+05, 0.11189E+05, 0.11364E+05, 0.11541E+05,
0.11719E+05, 0.11898E+05, 0.12079E+05, 0.12261E+05, 0.12444E+05,
0.12630E+05])
# --------------- CS 23: M = 46, I = 4 ---------------------
M = 46
I = 4
TIPS_GSI_HASH[(M, I)] = __FloatType__(4.)
TIPS_ISO_HASH[(M, I)] = float32([0.20737E+03, 0.29330E+03, 0.37930E+03,
0.46535E+03, 0.55145E+03, 0.63764E+03, 0.72394E+03, 0.81043E+03,
0.89722E+03, 0.98443E+03, 0.10722E+04, 0.11607E+04, 0.12501E+04,
0.13406E+04, 0.14323E+04, 0.15253E+04, 0.16197E+04, 0.17158E+04,
0.18135E+04, 0.19129E+04, 0.20142E+04, 0.21174E+04, 0.22226E+04,
0.23298E+04, 0.24391E+04, 0.25504E+04, 0.26639E+04, 0.27796E+04,
0.28976E+04, 0.30177E+04, 0.31401E+04, 0.32648E+04, 0.33918E+04,
0.35211E+04, 0.36527E+04, 0.37867E+04, 0.39231E+04, 0.40618E+04,
0.42029E+04, 0.43463E+04, 0.44922E+04, 0.46405E+04, 0.47912E+04,
0.49443E+04, 0.50999E+04, 0.52579E+04, 0.54183E+04, 0.55812E+04,
0.57465E+04, 0.59143E+04, 0.60846E+04, 0.62573E+04, 0.64325E+04,
0.66102E+04, 0.67903E+04, 0.69729E+04, 0.71581E+04, 0.73457E+04,
0.75358E+04, 0.77284E+04, 0.79235E+04, 0.81211E+04, 0.83212E+04,
0.85239E+04, 0.87290E+04, 0.89367E+04, 0.91469E+04, 0.93596E+04,
0.95748E+04, 0.97926E+04, 0.10013E+05, 0.10236E+05, 0.10461E+05,
0.10689E+05, 0.10920E+05, 0.11153E+05, 0.11388E+05, 0.11626E+05,
0.11867E+05, 0.12110E+05, 0.12356E+05, 0.12604E+05, 0.12855E+05,
0.13109E+05, 0.13365E+05, 0.13623E+05, 0.13884E+05, 0.14148E+05,
0.14415E+05, 0.14683E+05, 0.14955E+05, 0.15229E+05, 0.15506E+05,
0.15785E+05, 0.16067E+05, 0.16351E+05, 0.16638E+05, 0.16928E+05,
0.17220E+05, 0.17515E+05, 0.17813E+05, 0.18113E+05, 0.18416E+05,
0.18721E+05, 0.19029E+05, 0.19340E+05, 0.19653E+05, 0.19969E+05,
0.20287E+05, 0.20608E+05, 0.20932E+05, 0.21258E+05, 0.21587E+05,
0.21919E+05, 0.22253E+05, 0.22590E+05, 0.22930E+05, 0.23272E+05,
0.23617E+05])
# --------------- SO3 26: M = 46, I = 1 --------------------- not in TIPS-2011
M = 47
I = 1
TIPS_GSI_HASH[(M, I)] = __FloatType__(0.)
TIPS_ISO_HASH[(M, I)] = float32([0.])
# NOT IN HITRAN, BUT PRESENT IN TIPS-2011
# ... extracted from iso_comparison
#
# id M I COMMENT TIPS_M TIPS_I iso_name abundance mass mol_name
# 101 1001 1 not in HITRAN 45 H \N \N H
#
# 102 1002 1 not in HITRAN 45 He \N \N He
#
# 104 1018 1 not in HITRAN 45 Ar \N \N Ar
#
# not in HITRAN 45 4224 C2N2
# not in HITRAN 45 5225 C2N2
#
# not in HITRAN 48 26 SO
# not in HITRAN 48 46 SO
# not in HITRAN 48 28 SO
#
# not in HITRAN 49 1221 C3H4
#
# not in HITRAN 50 2111 CH3
#
# not in HITRAN 51 222 CS2
# not in HITRAN 51 224 CS2
# not in HITRAN 51 223 CS2
# not in HITRAN 51 232 CS2
# --------------- TIPS IMPLEMENTATION ----------------------
def BD_TIPS_2011_PYTHON(M, I, T):
# out of temperature range
if T < 70. or T > 3000.:
# Qt = -1.
# gi = 0.
# return gi,Qt
raise Exception('TIPS: T must be between 70K and 3000K.')
try:
# get statistical weight for specified isotopologue
gi = TIPS_GSI_HASH[(M, I)]
# interpolate partition sum for specified isotopologue
Qt = AtoB(T, Tdat, TIPS_ISO_HASH[(M, I)], TIPS_NPT)
except KeyError:
raise Exception('TIPS: no data for M,I = %d,%d.' % (M, I))
return gi, Qt
# Total internal partition sum
# M - molecule number
# I - isotopologue number
# T - temperature (K)
# returns (StatWeight,PartitionSum)
def partitionSum(M, I, T, step=None):
"""
INPUT PARAMETERS:
M: HITRAN molecule number (required)
I: HITRAN isotopologue number (required)
T: temperature conditions (required)
step: step to calculate temperatures (optional)
OUTPUT PARAMETERS:
TT: list of temperatures (present only if T is a list)
PartSum: partition sums calculated on a list of temperatures
---
DESCRIPTION:
Calculate range of partition sums at different temperatures.
This function uses a python implementation of TIPS-2011 code:
Reference:
A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman.
Total internal partition sums to support planetary remote sensing.
Icarus, Volume 215, Issue 1, September 2011, Pages 391–400
http://dx.doi.org/10.1016/j.icarus.2011.06.004
Output depends on a structure of input parameter T so that:
1) If T is a scalar/list and step IS NOT provided,
then calculate partition sums over each value of T.
2) If T is a list and step parameter IS provided,
then calculate partition sums between T[0] and T[1]
with a given step.
---
EXAMPLE OF USAGE:
PartSum = partitionSum(1,1,[296,1000])
TT,PartSum = partitionSum(1,1,[296,1000],step=0.1)
---
"""
# partitionSum
if not step:
if type(T) not in set([list, tuple]):
return BD_TIPS_2011_PYTHON(M, I, T)[1]
else:
return [BD_TIPS_2011_PYTHON(M, I, temp)[1] for temp in T]
else:
# n = (T[1]-T[0])/step
# TT = linspace(T[0],T[1],n)
TT = arange(T[0], T[1], step)
return TT, array([BD_TIPS_2011_PYTHON(M, I, temp)[1] for temp in TT])
# ------------------ partition sum --------------------------------------
# ------------------ LINESHAPES -----------------------------------------
# ------------------ complex probability function -----------------------
# define static data
zone = __ComplexType__(1.0e0 + 0.0e0j)
zi = __ComplexType__(0.0e0 + 1.0e0j)
tt = __FloatType__(
[0.5e0, 1.5e0, 2.5e0, 3.5e0, 4.5e0, 5.5e0, 6.5e0, 7.5e0, 8.5e0, 9.5e0, 10.5e0, 11.5e0, 12.5e0, 13.5e0, 14.5e0])
pipwoeronehalf = __FloatType__(0.564189583547756e0)
# "naive" implementation for benchmarks
def cpf3(X, Y):
# X,Y,WR,WI - numpy arrays
if type(X) != ndarray:
if type(X) not in set([list, tuple]):
X = array([X])
else:
X = array(X)
if type(Y) != ndarray:
if type(Y) not in set([list, tuple]):
Y = array([Y])
else:
Y = array(Y)
zm1 = zone / __ComplexType__(X + zi * Y) # maybe redundant
zm2 = zm1 ** 2
zsum = zone
zterm = zone
for tt_i in tt:
zterm *= zm2 * tt_i
zsum += zterm
zsum *= zi * zm1 * pipwoeronehalf
return zsum.real, zsum.imag
T = __FloatType__([0.314240376e0, 0.947788391e0, 1.59768264e0, 2.27950708e0, 3.02063703e0, 3.8897249e0])
U = __FloatType__([1.01172805e0, -0.75197147e0, 1.2557727e-2, 1.00220082e-2, -2.42068135e-4, 5.00848061e-7])
S = __FloatType__([1.393237e0, 0.231152406e0, -0.155351466e0, 6.21836624e-3, 9.19082986e-5, -6.27525958e-7])
# Complex probability function implementation (Humlicek)
def cpf(X, Y):
# X,Y,WR,WI - numpy arrays
if type(X) != ndarray:
if type(X) not in set([list, tuple]):
X = array([X])
else:
X = array(X)
if type(Y) != ndarray:
if type(Y) not in set([list, tuple]):
Y = array([Y])
else:
Y = array(Y)
# REGION3
index_REGION3 = where(sqrt(X ** 2 + Y ** 2) > __FloatType__(8.0e0))
X_REGION3 = X[index_REGION3]
Y_REGION3 = Y[index_REGION3]
zm1 = zone / __ComplexType__(X_REGION3 + zi * Y_REGION3)
zm2 = zm1 ** 2
zsum_REGION3 = zone
zterm = zone
for tt_i in tt:
zterm *= zm2 * tt_i
zsum_REGION3 += zterm
zsum_REGION3 *= zi * zm1 * pipwoeronehalf
index_REGION12 = setdiff1d(array(arange(len(X))), array(index_REGION3))
X_REGION12 = X[index_REGION12]
Y_REGION12 = Y[index_REGION12]
WR = __FloatType__(0.0e0)
WI = __FloatType__(0.0e0)
# REGION12
Y1_REGION12 = Y_REGION12 + __FloatType__(1.5e0)
Y2_REGION12 = Y1_REGION12 ** 2
# REGION2
subindex_REGION2 = where((Y_REGION12 <= 0.85e0) &
(abs(X_REGION12) >= (18.1e0 * Y_REGION12 + 1.65e0)))
index_REGION2 = index_REGION12[subindex_REGION2]
X_REGION2 = X[index_REGION2]
Y_REGION2 = Y[index_REGION2]
Y1_REGION2 = Y1_REGION12[subindex_REGION2]
Y2_REGION2 = Y2_REGION12[subindex_REGION2]
Y3_REGION2 = Y_REGION2 + __FloatType__(3.0e0)
WR_REGION2 = WR
WI_REGION2 = WI
WR_REGION2 = zeros(len(X_REGION2))
ii = abs(X_REGION2) < __FloatType__(12.0e0)
WR_REGION2[ii] = exp(-X_REGION2[ii] ** 2)
WR_REGION2[~ii] = WR
for I in range(6):
R_REGION2 = X_REGION2 - T[I]
R2_REGION2 = R_REGION2 ** 2
D_REGION2 = __FloatType__(1.0e0) / (R2_REGION2 + Y2_REGION2)
D1_REGION2 = Y1_REGION2 * D_REGION2
D2_REGION2 = R_REGION2 * D_REGION2
WR_REGION2 = WR_REGION2 + Y_REGION2 * (U[I] * (R_REGION2 * D2_REGION2 - 1.5e0 * D1_REGION2) +
S[I] * Y3_REGION2 * D2_REGION2) / (R2_REGION2 + 2.25e0)
R_REGION2 = X_REGION2 + T[I]
R2_REGION2 = R_REGION2 ** 2
D_REGION2 = __FloatType__(1.0e0) / (R2_REGION2 + Y2_REGION2)
D3_REGION2 = Y1_REGION2 * D_REGION2
D4_REGION2 = R_REGION2 * D_REGION2
WR_REGION2 = WR_REGION2 + Y_REGION2 * (U[I] * (R_REGION2 * D4_REGION2 - 1.5e0 * D3_REGION2) -
S[I] * Y3_REGION2 * D4_REGION2) / (R2_REGION2 + 2.25e0)
WI_REGION2 = WI_REGION2 + U[I] * (D2_REGION2 + D4_REGION2) + S[I] * (D1_REGION2 - D3_REGION2)
# REGION3
index_REGION1 = setdiff1d(array(index_REGION12), array(index_REGION2))
X_REGION1 = X[index_REGION1]
Y_REGION1 = X[index_REGION1]
subindex_REGION1 = setdiff1d(array(arange(len(index_REGION12))), array(subindex_REGION2))
Y1_REGION1 = Y1_REGION12[subindex_REGION1]
Y2_REGION1 = Y2_REGION12[subindex_REGION1]
WR_REGION1 = WR
WI_REGION1 = WI
for I in range(6):
R_REGION1 = X_REGION1 - T[I]
D_REGION1 = __FloatType__(1.0e0) / (R_REGION1 ** 2 + Y2_REGION1)
D1_REGION1 = Y1_REGION1 * D_REGION1
D2_REGION1 = R_REGION1 * D_REGION1
R_REGION1 = X_REGION1 + T[I]
D_REGION1 = __FloatType__(1.0e0) / (R_REGION1 ** 2 + Y2_REGION1)
D3_REGION1 = Y1_REGION1 * D_REGION1
D4_REGION1 = R_REGION1 * D_REGION1
WR_REGION1 = WR_REGION1 + U[I] * (D1_REGION1 + D3_REGION1) - S[I] * (D2_REGION1 - D4_REGION1)
WI_REGION1 = WI_REGION1 + U[I] * (D2_REGION1 + D4_REGION1) + S[I] * (D1_REGION1 - D3_REGION1)
# total result
WR_TOTAL = zeros(len(X))
WI_TOTAL = zeros(len(X))
# REGION3
WR_TOTAL[index_REGION3] = zsum_REGION3.real
WI_TOTAL[index_REGION3] = zsum_REGION3.imag
# REGION2
WR_TOTAL[index_REGION2] = WR_REGION2
WI_TOTAL[index_REGION2] = WI_REGION2
# REGION1
WR_TOTAL[index_REGION1] = WR_REGION1
WI_TOTAL[index_REGION1] = WI_REGION1
return WR_TOTAL, WI_TOTAL
hcpf = cpf # stub for initial cpf
# ------------------ Schreier CPF ------------------------
# "Optimized implementations of rational approximations
# for the Voigt and complex error function".
# Franz Schreier. JQSRT 112 (2011) 1010-10250
# doi:10.1016/j.jqsrt.2010.12.010
# Enable this if numpy.polyval doesn't perform well.
"""
def polyval(p, x):
y = zeros(x.shape, dtype=float)
for i, v in enumerate(p):
y *= x
y += v
return y
"""
def cef(x, y, N):
# Computes the function w(z) = exp(-zA2) erfc(-iz) using a rational
# series with N terms. It is assumed that Im(z) > 0 or Im(z) = 0.
z = x + 1.0j * y
M = 2 * N;
M2 = 2 * M;
k = arange(-M + 1, M) # '; # M2 = no. of sampling points.
L = sqrt(N / sqrt(2)); # Optimal choice of L.
theta = k * pi / M;
t = L * tan(theta / 2); # Variables theta and t.
# f = exp(-t.A2)*(LA2+t.A2); f = [0; f]; # Function to be transformed.
f = zeros(len(t) + 1);
f[0] = 0
f[1:] = exp(-t ** 2) * (L ** 2 + t ** 2)
# f = insert(exp(-t**2)*(L**2+t**2),0,0)
a = real(fft(fftshift(f))) / M2; # Coefficients of transform.
a = flipud(a[1:N + 1]); # Reorder coefficients.
Z = (L + 1.0j * z) / (L - 1.0j * z);
p = polyval(a, Z); # Polynomial evaluation.
w = 2 * p / (L - 1.0j * z) ** 2 + (1 / sqrt(pi)) / (L - 1.0j * z); # Evaluate w(z).
return w
# weideman24 by default
# weideman24 = lambda x,y: cef(x,y,24)
weideman = lambda x, y, n: cef(x, y, n)
def hum1_wei(x, y, n=24):
t = y - 1.0j * x
cerf = 1 / sqrt(pi) * t / (0.5 + t ** 2)
"""
z = x+1j*y
cerf = 1j*z/sqrt(pi)/(z**2-0.5)
"""
mask = abs(x) + y < 15.0
if any(mask):
w24 = weideman(x[mask], y[mask], n)
place(cerf, mask, w24)
return cerf.real, cerf.imag
VARIABLES['CPF'] = hum1_wei
# VARIABLES['CPF'] = cpf
# ------------------ Hartmann-Tran Profile (HTP) ------------------------
def pcqsdhc(sg0, GamD, Gam0, Gam2, Shift0, Shift2, anuVC, eta, sg):
# -------------------------------------------------
# "pCqSDHC": partially-Correlated quadratic-Speed-Dependent Hard-Collision
# Subroutine to Compute the complex normalized spectral shape of an
# isolated line by the pCqSDHC model
#
# Reference:
# H. Tran, N.H. Ngo, J.-M. Hartmann.
# Efficient computation of some speed-dependent isolated line profiles.
# JQSRT, Volume 129, November 2013, Pages 199–203
# http://dx.doi.org/10.1016/j.jqsrt.2013.06.015
#
# Input/Output Parameters of Routine (Arguments or Common)
# ---------------------------------
# T : Temperature in Kelvin (Input).
# amM1 : Molar mass of the absorber in g/mol(Input).
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# eta : Correlation parameter, No unit (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
#
# Output Quantities (through Common Statements)
# -----------------
# LS_pCqSDHC_R: Real part of the normalized spectral shape (cm)
# LS_pCqSDHC_I: Imaginary part of the normalized spectral shape (cm)
#
# Called Routines: 'CPF' (Complex Probability Function)
# --------------- 'CPF3' (Complex Probability Function for the region 3)
#
# Called By: Main Program
# ---------
#
# Double Precision Version
#
# -------------------------------------------------
# sg is the only vector argument which is passed to fusnction
if type(sg) not in set([array, ndarray, list, tuple]):
sg = array([sg])
number_of_points = len(sg)
Aterm_GLOBAL = zeros(number_of_points, dtype=__ComplexType__)
Bterm_GLOBAL = zeros(number_of_points, dtype=__ComplexType__)
cte = sqrt(log(2.0e0)) / GamD
rpi = sqrt(pi)
iz = __ComplexType__(0.0e0 + 1.0e0j)
c0 = __ComplexType__(Gam0 + 1.0e0j * Shift0)
c2 = __ComplexType__(Gam2 + 1.0e0j * Shift2)
c0t = __ComplexType__((1.0e0 - eta) * (c0 - 1.5e0 * c2) + anuVC)
c2t = __ComplexType__((1.0e0 - eta) * c2)
# PART1
if abs(c2t) == 0.0e0:
# print('PART1') # DEBUG
Z1 = (iz * (sg0 - sg) + c0t) * cte
xZ1 = -Z1.imag
yZ1 = Z1.real
# WR1,WI1 = cpf(xZ1,yZ1)
WR1, WI1 = VARIABLES['CPF'](xZ1, yZ1)
Aterm_GLOBAL = rpi * cte * __ComplexType__(WR1 + 1.0e0j * WI1)
index_Z1 = abs(Z1) <= 4.0e3
index_NOT_Z1 = ~index_Z1
if any(index_Z1):
# print('PART1/Z1') # DEBUG
Bterm_GLOBAL = rpi * cte * ((1.0e0 - Z1 ** 2) * __ComplexType__(WR1 + 1.0e0j * WI1) + Z1 / rpi)
if any(index_NOT_Z1):
# print('PART1/~Z1') # DEBUG
Bterm_GLOBAL = cte * (rpi * __ComplexType__(WR1 + 1.0e0j * WI1) + 0.5e0 / Z1 - 0.75e0 / (Z1 ** 3))
else:
# PART2, PART3 AND PART4 (PART4 IS A MAIN PART)
# X - vector, Y - scalar
X = (iz * (sg0 - sg) + c0t) / c2t
Y = __ComplexType__(1.0e0 / ((2.0e0 * cte * c2t)) ** 2)
csqrtY = (Gam2 - iz * Shift2) / (2.0e0 * cte * (1.0e0 - eta) * (Gam2 ** 2 + Shift2 ** 2))
index_PART2 = abs(X) <= 3.0e-8 * abs(Y)
index_PART3 = (abs(Y) <= 1.0e-15 * abs(X)) & ~index_PART2
index_PART4 = ~ (index_PART2 | index_PART3)
# PART4
if any(index_PART4):
# print('PART4') # DEBUG
X_TMP = X[index_PART4]
Z1 = sqrt(X_TMP + Y) - csqrtY
Z2 = Z1 + __FloatType__(2.0e0) * csqrtY
xZ1 = -Z1.imag
yZ1 = Z1.real
xZ2 = -Z2.imag
yZ2 = Z2.real
SZ1 = sqrt(xZ1 ** 2 + yZ1 ** 2)
SZ2 = sqrt(xZ2 ** 2 + yZ2 ** 2)
DSZ = abs(SZ1 - SZ2)
SZmx = maximum(SZ1, SZ2)
SZmn = minimum(SZ1, SZ2)
length_PART4 = len(index_PART4)
WR1_PART4 = zeros(length_PART4)
WI1_PART4 = zeros(length_PART4)
WR2_PART4 = zeros(length_PART4)
WI2_PART4 = zeros(length_PART4)
index_CPF3 = (DSZ <= 1.0e0) & (SZmx > 8.0e0) & (SZmn <= 8.0e0)
index_CPF = ~index_CPF3 # can be removed
if any(index_CPF3):
# print('PART4/CPF3') # DEBUG
WR1, WI1 = cpf3(xZ1[index_CPF3], yZ1[index_CPF3])
WR2, WI2 = cpf3(xZ2[index_CPF3], yZ2[index_CPF3])
WR1_PART4[index_CPF3] = WR1
WI1_PART4[index_CPF3] = WI1
WR2_PART4[index_CPF3] = WR2
WI2_PART4[index_CPF3] = WI2
if any(index_CPF):
# print('PART4/CPF') # DEBUG
# print(VARIABLES['CPF'])
# WR1,WI1 = cpf(xZ1[index_CPF],yZ1[index_CPF])
# WR2,WI2 = cpf(xZ2[index_CPF],yZ2[index_CPF])
WR1, WI1 = VARIABLES['CPF'](xZ1[index_CPF], yZ1[index_CPF])
WR2, WI2 = VARIABLES['CPF'](xZ2[index_CPF], yZ2[index_CPF])
WR1_PART4[index_CPF] = WR1
WI1_PART4[index_CPF] = WI1
WR2_PART4[index_CPF] = WR2
WI2_PART4[index_CPF] = WI2
Aterm = rpi * cte * (
__ComplexType__(WR1_PART4 + 1.0e0j * WI1_PART4) - __ComplexType__(WR2_PART4 + 1.0e0j * WI2_PART4))
Bterm = (-1.0e0 +
rpi / (2.0e0 * csqrtY) * (1.0e0 - Z1 ** 2) * __ComplexType__(WR1_PART4 + 1.0e0j * WI1_PART4) -
rpi / (2.0e0 * csqrtY) * (1.0e0 - Z2 ** 2) * __ComplexType__(WR2_PART4 + 1.0e0j * WI2_PART4)) / c2t
Aterm_GLOBAL[index_PART4] = Aterm
Bterm_GLOBAL[index_PART4] = Bterm
# PART2
if any(index_PART2):
# print('PART2') # DEBUG
X_TMP = X[index_PART2]
Z1 = (iz * (sg0 - sg[index_PART2]) + c0t) * cte
Z2 = sqrt(X_TMP + Y) + csqrtY
xZ1 = -Z1.imag
yZ1 = Z1.real
xZ2 = -Z2.imag
yZ2 = Z2.real
# WR1_PART2,WI1_PART2 = cpf(xZ1,yZ1)
# WR2_PART2,WI2_PART2 = cpf(xZ2,yZ2)
WR1_PART2, WI1_PART2 = VARIABLES['CPF'](xZ1, yZ1)
WR2_PART2, WI2_PART2 = VARIABLES['CPF'](xZ2, yZ2)
Aterm = rpi * cte * (
__ComplexType__(WR1_PART2 + 1.0e0j * WI1_PART2) - __ComplexType__(WR2_PART2 + 1.0e0j * WI2_PART2))
Bterm = (-1.0e0 +
rpi / (2.0e0 * csqrtY) * (1.0e0 - Z1 ** 2) * __ComplexType__(WR1_PART2 + 1.0e0j * WI1_PART2) -
rpi / (2.0e0 * csqrtY) * (1.0e0 - Z2 ** 2) * __ComplexType__(WR2_PART2 + 1.0e0j * WI2_PART2)) / c2t
Aterm_GLOBAL[index_PART2] = Aterm
Bterm_GLOBAL[index_PART2] = Bterm
# PART3
if any(index_PART3):
# print('PART3') # DEBUG
X_TMP = X[index_PART3]
xZ1 = -sqrt(X_TMP + Y).imag
yZ1 = sqrt(X_TMP + Y).real
# WR1_PART3,WI1_PART3 = cpf(xZ1,yZ1)
WR1_PART3, WI1_PART3 = VARIABLES['CPF'](xZ1, yZ1)
index_ABS = abs(sqrt(X_TMP)) <= 4.0e3
index_NOT_ABS = ~index_ABS
Aterm = zeros(len(index_PART3), dtype=__ComplexType__)
Bterm = zeros(len(index_PART3), dtype=__ComplexType__)
if any(index_ABS):
xXb = -sqrt(X).imag
yXb = sqrt(X).real
# WRb,WIb = cpf(xXb,yXb)
WRb, WIb = VARIABLES['CPF'](xXb, yXb)
Aterm[index_ABS] = (2.0e0 * rpi / c2t) * (
1.0e0 / rpi - sqrt(X_TMP[index_ABS]) * __ComplexType__(WRb + 1.0e0j * WIb))
Bterm[index_ABS] = (1.0e0 / c2t) * (-1.0e0 +
2.0e0 * rpi * (1.0e0 - X_TMP[index_ABS] - 2.0e0 * Y) * (
1.0e0 / rpi - sqrt(X_TMP[index_ABS]) * __ComplexType__(
WRb + 1.0e0j * WIb)) +
2.0e0 * rpi * sqrt(X_TMP[index_ABS] + Y) * __ComplexType__(
WR1_PART3 + 1.0e0j * WI1_PART3))
if any(index_NOT_ABS):
Aterm[index_NOT_ABS] = (1.0e0 / c2t) * (
1.0e0 / X_TMP[index_NOT_ABS] - 1.5e0 / (X_TMP[index_NOT_ABS] ** 2))
Bterm[index_NOT_ABS] = (1.0e0 / c2t) * (-1.0e0 + (1.0e0 - X_TMP[index_NOT_ABS] - 2.0e0 * Y) *
(1.0e0 / X_TMP[index_NOT_ABS] - 1.5e0 / (
X_TMP[index_NOT_ABS] ** 2)) +
2.0e0 * rpi * sqrt(X_TMP[index_NOT_ABS] + Y) * __ComplexType__(
WR1 + 1.0e0j * WI1))
Aterm_GLOBAL[index_PART3] = Aterm
Bterm_GLOBAL[index_PART3] = Bterm
# common part
LS_pCqSDHC = (1.0e0 / pi) * (
Aterm_GLOBAL / (1.0e0 - (anuVC - eta * (c0 - 1.5e0 * c2)) * Aterm_GLOBAL + eta * c2 * Bterm_GLOBAL))
# print('pcqsdc_end',sum(LS_pCqSDHC.real),sum(LS_pCqSDHC.imag))
return LS_pCqSDHC.real, LS_pCqSDHC.imag
# ------------------ CROSS-SECTIONS, XSECT.PY --------------------------------
# set interfaces for TIPS(M,I,T)
PYTIPS = lambda M, I, T: BD_TIPS_2011_PYTHON(M, I, T)[1]
# set interfaces for profiles
# PYHTP = pcqsdhc
# PROFILE_HTP = PYHTP
# PROFILE_VOIGT = lambda sg0,GamD,Gam0,sg: PROFILE_HTP(sg0,GamD,Gam0,cZero,cZero,cZero,cZero,cZero,sg)
# PROFILE_LORENTZ = lambda sg0,Gam0,sg: Gam0/(pi*(Gam0**2+(sg-sg0)**2))
# PROFILE_DOPPLER = lambda sg0,GamD,sg: cSqrtLn2divSqrtPi*exp(-cLn2*((sg-sg0)/GamD)**2)/GamD
def PROFILE_HT(sg0, GamD, Gam0, Gam2, Shift0, Shift2, anuVC, eta, sg):
"""
#-------------------------------------------------
# "pCqSDHC": partially-Correlated quadratic-Speed-Dependent Hard-Collision
# Subroutine to Compute the complex normalized spectral shape of an
# isolated line by the pCqSDHC model
#
# References:
#
# 1) N.H. Ngo, D. Lisak, H. Tran, J.-M. Hartmann.
# An isolated line-shape model to go beyond the Voigt profile in
# spectroscopic databases and radiative transfer codes.
# JQSRT, Volume 129, November 2013, Pages 89–100
# http://dx.doi.org/10.1016/j.jqsrt.2013.05.034
#
# 2) H. Tran, N.H. Ngo, J.-M. Hartmann.
# Efficient computation of some speed-dependent isolated line profiles.
# JQSRT, Volume 129, November 2013, Pages 199–203
# http://dx.doi.org/10.1016/j.jqsrt.2013.06.015
#
# 3) H. Tran, N.H. Ngo, J.-M. Hartmann.
# Erratum to “Efficient computation of some speed-dependent isolated line profilesâ€.
# JQSRT, Volume 134, February 2014, Pages 104
# http://dx.doi.org/10.1016/j.jqsrt.2013.10.015
#
# Input/Output Parameters of Routine (Arguments or Common)
# ---------------------------------
# T : Temperature in Kelvin (Input).
# amM1 : Molar mass of the absorber in g/mol(Input).
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# eta : Correlation parameter, No unit (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
#
# The function has two outputs:
# -----------------
# (1): Real part of the normalized spectral shape (cm)
# (2): Imaginary part of the normalized spectral shape (cm)
#
# Called Routines: 'CPF' (Complex Probability Function)
# --------------- 'CPF3' (Complex Probability Function for the region 3)
#
# Based on a double precision Fortran version
#
#-------------------------------------------------
"""
return pcqsdhc(sg0, GamD, Gam0, Gam2, Shift0, Shift2, anuVC, eta, sg)
PROFILE_HTP = PROFILE_HT # stub for backwards compatibility
def PROFILE_SDRAUTIAN(sg0, GamD, Gam0, Gam2, Shift0, Shift2, anuVC, sg):
"""
# Speed dependent Rautian profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
"""
return pcqsdhc(sg0, GamD, Gam0, Gam2, Shift0, Shift2, anuVC, cZero, sg)
def PROFILE_RAUTIAN(sg0, GamD, Gam0, Shift0, anuVC, eta, sg):
"""
# Rautian profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
"""
return pcqsdhc(sg0, GamD, Gam0, cZero, Shift0, cZero, anuVC, cZero, sg)
def PROFILE_SDVOIGT(sg0, GamD, Gam0, Gam2, Shift0, Shift2, sg):
"""
# Speed dependent Voigt profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
"""
return pcqsdhc(sg0, GamD, Gam0, Gam2, Shift0, Shift2, cZero, cZero, sg)
def PROFILE_VOIGT(sg0, GamD, Gam0, sg):
"""
# Voigt profile based on HTP.
# Input parameters:
# sg0: Unperturbed line position in cm-1 (Input).
# GamD: Doppler HWHM in cm-1 (Input)
# Gam0: Speed-averaged line-width in cm-1 (Input).
# sg: Current WaveNumber of the Computation in cm-1 (Input).
"""
return PROFILE_HTP(sg0, GamD, Gam0, cZero, cZero, cZero, cZero, cZero, sg)
def PROFILE_LORENTZ(sg0, Gam0, sg):
"""
# Lorentz profile.
# Input parameters:
# sg0: Unperturbed line position in cm-1 (Input).
# Gam0: Speed-averaged line-width in cm-1 (Input).
# sg: Current WaveNumber of the Computation in cm-1 (Input).
"""
return Gam0 / (pi * (Gam0 ** 2 + (sg - sg0) ** 2))
def PROFILE_DOPPLER(sg0, GamD, sg):
"""
# Doppler profile.
# Input parameters:
# sg0: Unperturbed line position in cm-1 (Input).
# GamD: Doppler HWHM in cm-1 (Input)
# sg: Current WaveNumber of the Computation in cm-1 (Input).
"""
return cSqrtLn2divSqrtPi * exp(-cLn2 * ((sg - sg0) / GamD) ** 2) / GamD
# Volume concentration of all gas molecules at the pressure p and temperature T
def volumeConcentration(p, T):
return (p / 9.869233e-7) / (cBolts * T) # CGS
# ------------------------------- PARAMETER DEPENDENCIES --------------------------------
# temperature dependence for intencities (HITRAN)
def EnvironmentDependency_Intensity(LineIntensityRef, T, Tref, SigmaT, SigmaTref,
LowerStateEnergy, LineCenter):
const = __FloatType__(1.4388028496642257)
ch = exp(-const * LowerStateEnergy / T) * (1 - exp(-const * LineCenter / T))
zn = exp(-const * LowerStateEnergy / Tref) * (1 - exp(-const * LineCenter / Tref))
LineIntensity = LineIntensityRef * SigmaTref / SigmaT * ch / zn
return LineIntensity
# environmental dependence for GammaD (HTP, Voigt) # Tref/T ????
def EnvironmentDependency_GammaD(GammaD_ref, T, Tref):
# Doppler parameters do not depend on pressure!
return GammaD_ref * sqrt(T / Tref)
# environmental dependence for Gamma0 (HTP, Voigt)
def EnvironmentDependency_Gamma0(Gamma0_ref, T, Tref, p, pref, TempRatioPower):
return Gamma0_ref * p / pref * (Tref / T) ** TempRatioPower
# environmental dependence for Gamma2 (HTP)
def EnvironmentDependency_Gamma2(Gamma2_ref, T, Tref, p, pref, TempRatioPower):
return Gamma2_ref * p / pref * (Tref / T) ** TempRatioPower
# environmental dependence for Delta0 (HTP)
def EnvironmentDependency_Delta0(Delta0_ref, p, pref):
return Delta0_ref * p / pref
# environmental dependence for Delta2 (HTP)
def EnvironmentDependency_Delta2(Delta2_ref, p, pref):
return Delta2_ref * p / pref
# environmental dependence for anuVC (HTP)
def EnvironmentDependency_anuVC(anuVC_ref, T, Tref, p, pref):
return anuVC_ref * Tref / T * p / pref
# ------------------------------- /PARAMETER DEPENDENCIES --------------------------------
# ------------------------------- BINGINGS --------------------------------
# default parameter bindings
DefaultParameterBindings = {}
# default temperature dependencies
DefaultEnvironmentDependencyBindings = {}
# ------------------------------- /BINGINGS --------------------------------
# default values for intensity threshold
DefaultIntensityThreshold = 0. # cm*molec
# default value for omega wing in halfwidths (from center)
DefaultOmegaWingHW = 50. # cm-1 HOTW default
# check and argument for being a tuple or list
# this is connected with a "bug" that in Python
# (val) is not a tuple, but (val,) is a tuple
def listOfTuples(a):
if type(a) not in set([list, tuple]):
a = [a]
return a
# determine default parameters from those which are passed to absorptionCoefficient_...
def getDefaultValuesForXsect(Components, SourceTables, Environment, OmegaRange,
OmegaStep, OmegaWing, IntensityThreshold, Format):
if SourceTables[0] == None:
SourceTables = ['__BUFFER__', ]
if Environment == None:
Environment = {'T': 296., 'p': 1.}
if Components == [None]:
CompDict = {}
for TableName in SourceTables:
# check table existance
if TableName not in LOCAL_TABLE_CACHE.keys():
raise Exception('%s: no such table. Check tableList() for more info.' % TableName)
mol_ids = LOCAL_TABLE_CACHE[TableName]['data']['molec_id']
iso_ids = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id']
if len(mol_ids) != len(iso_ids):
raise Exception('Lengths if mol_ids and iso_ids differ!')
MI_zip = zip(mol_ids, iso_ids)
MI_zip = set(MI_zip)
for mol_id, iso_id in MI_zip:
CompDict[(mol_id, iso_id)] = None
Components = CompDict.keys()
if OmegaRange == None:
omega_min = float('inf')
omega_max = float('-inf')
for TableName in SourceTables:
nu = LOCAL_TABLE_CACHE[TableName]['data']['nu']
numin = min(nu)
numax = max(nu)
if omega_min > numin:
omega_min = numin
if omega_max < numax:
omega_max = numax
OmegaRange = (omega_min, omega_max)
OmegaDelta = OmegaRange[1] - OmegaRange[0]
if OmegaStep == None:
# OmegaStep = OmegaDelta/100.
OmegaStep = 0.01 # cm-1
if OmegaWing == None:
# OmegaWing = OmegaDelta/10.
OmegaWing = 0.0 # cm-1
if not Format:
"""
Infinitesimal = 1e-14 # put this to header in next version!
min_number_of_digits = 4 # minimal number of digits after dec. pnt.
last_digit_pos = 0
while modf(OmegaStep * 10**last_digit_pos)[0] > Infinitesimal:
last_digit_pos += 1
actual_number_of_digits = max(min_number_of_digits,last_digit_pos)
Format = '%%.%df %%e' % actual_number_of_digits
"""
Format = '%.12f %e'
return Components, SourceTables, Environment, OmegaRange, \
OmegaStep, OmegaWing, IntensityThreshold, Format
# save numpy arrays to file
# arrays must have same dimensions
def save_to_file(fname, fformat, *arg):
f = open(fname, 'w')
for i in range(len(arg[0])):
argline = []
for j in range(len(arg)):
argline.append(arg[j][i])
f.write((fformat + '\n') % tuple(argline))
f.close()
# ==========================================================================================
# =========================== NEW ABSORPTION COEFFICIENT ===================================
# ==========================================================================================
def absorptionCoefficient_HT(Components=None, SourceTables=None, partitionFunction=PYTIPS,
Environment=None, OmegaRange=None, OmegaStep=None, OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None,
WavenumberRange=None, WavenumberStep=None, WavenumberWing=None,
WavenumberWingHW=None, WavenumberGrid=None,
Diluent={}, EnvDependences=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - relative abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
WavenumberRange: wavenumber range to consider.
WavenumberStep: wavenumber step to consider.
WavenumberWing: absolute wing for calculating a lineshape (in cm-1)
WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths)
IntensityThreshold: threshold for intensities
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts for significant digits in WavenumberStep)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid with respect to parameters WavenumberRange and WavenumberStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using HT profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation.
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_HT(((2,1),),'co2',WavenumberStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
warn('To get the most up-to-date version please check http://hitran.org/hapi')
# Paremeters OmegaRange,OmegaStep,OmegaWing,OmegaWingHW, and OmegaGrid
# are deprecated and given for backward compatibility with the older versions.
if WavenumberRange: OmegaRange = WavenumberRange
if WavenumberStep: OmegaStep = WavenumberStep
if WavenumberWing: OmegaWing = WavenumberWing
if WavenumberWingHW: OmegaWingHW = WavenumberWingHW
if WavenumberGrid: OmegaGrid = WavenumberGrid
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components, SourceTables, Environment, OmegaRange, OmegaStep, OmegaWing, \
IntensityThreshold, Format = \
getDefaultValuesForXsect(Components, SourceTables, Environment, OmegaRange,
OmegaStep, OmegaWing, IntensityThreshold, Format)
# warn user about too large omega step
if OmegaStep > 0.1: warn('Big wavenumber step: possible accuracy decline')
# get uniform linespace for cross-section
# number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
# Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
# Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
Omegas = arange_(OmegaRange[0], OmegaRange[1], OmegaStep) # fix
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# Find reference temperature
TRanges = [(0, 100), (100, 200), (200, 400), (400, float('inf'))]
Trefs = [50., 150., 296., 700.]
for TRange, TrefHT in zip(TRanges, Trefs):
if T >= TRange[0] and T < TRange[1]:
break
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M, I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M, I))
ABUNDANCES[(M, I)] = ni
NATURAL_ABUNDANCES[(M, I)] = ISO[(M, I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p, T)
# setup the default empty environment dependence function
if not EnvDependences:
EnvDependences = lambda ENV, LINE: {}
Env = Environment.copy()
Env['Tref'] = Tref
Env['pref'] = pref
# setup the Diluent variable
GammaL = GammaL.lower()
if not Diluent:
if GammaL == 'gamma_air':
Diluent = {'air': 1.}
elif GammaL == 'gamma_self':
Diluent = {'self': 1.}
else:
raise Exception('Unknown GammaL value: %s' % GammaL)
# Simple check
for key in Diluent:
val = Diluent[key]
if val < 0 and val > 1:
raise Exception('Diluent fraction must be in [0,1]')
# SourceTables contain multiple tables
for TableName in SourceTables:
# get the number of rows
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# get parameter names for each table
parnames = LOCAL_TABLE_CACHE[TableName]['data'].keys()
# loop through line centers (single stream)
for RowID in range(nline):
# Get the custom environment dependences
Line = {parname: LOCAL_TABLE_CACHE[TableName]['data'][parname][RowID] for parname in parnames}
CustomEnvDependences = EnvDependences(Env, Line)
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
# filter by molecule and isotopologue
if (MoleculeNumberDB, IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
SigmaT = partitionFunction(MoleculeNumberDB, IsoNumberDB, T)
SigmaTref = partitionFunction(MoleculeNumberDB, IsoNumberDB, Tref)
# get all environment dependences from voigt parameters
# intensity
if 'sw' in CustomEnvDependences:
LineIntensity = CustomEnvDependences['sw']
else:
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB, T, Tref, SigmaT, SigmaTref,
LowerStateEnergyDB, LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
cMassMol = 1.66053873e-27 # hapi
m = molecularMass(MoleculeNumberDB, IsoNumberDB) * cMassMol * 1000
GammaD = sqrt(2 * cBolts * T * log(2) / m / cc ** 2) * LineCenterDB
# pressure broadening coefficient
Gamma0 = 0.;
Shift0 = 0.;
Gamma2 = 0.;
Shift2 = 0.;
Delta2 = 0.;
NuVC = 0.;
EtaNumer = 0.;
for species in Diluent:
species_lower = species.lower()
abun = Diluent[species]
# Search for broadening HWHM.
try:
# search for HT-style name
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_HT_0_%s_%d' % (species_lower, TrefHT)][RowID]
if Gamma0DB == 0.: raise Exception
except:
try:
# search for Voigt-style name
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_%s' % species_lower][RowID]
except:
Gamma0DB = 0.0
# Search for temperature exponent for broadening HWHM.
try:
# search for HT-style name
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_HT_%s_%d' % (species_lower, TrefHT)][
RowID]
if TempRatioPowerDB == 0.: raise Exception
Tref = TrefHT
except:
Tref = 296.
try:
# search for Voigt-style name
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_%s' % species_lower][RowID]
if species_lower == 'self' and TempRatioPowerDB == 0.:
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][
RowID] # same for self as for air
except:
# print('TempRatioPowerDB is set to zero')
# TempRatioPowerDB = 0
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
# Add to the final Gamma0
Gamma0T = CustomEnvDependences.get('gamma_HT_0_%s_%d' % (species_lower, TrefHT),
CustomEnvDependences.get('gamma_%s' % species_lower,
EnvironmentDependency_Gamma0(Gamma0DB, T,
Tref, p, pref,
TempRatioPowerDB)))
Gamma0 += abun * Gamma0T
# Search for shift.
try:
# search for HT-style name
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_HT_0_%s_%d' % (species_lower, TrefHT)][RowID]
if Shift0DB == 0.: raise Exception
except:
try:
# search for Voigt-style name
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_%s' % species_lower][RowID]
except:
Shift0DB = 0.0
# Search for temperature dependence for shift.
try:
# search for HT-style name
deltap = LOCAL_TABLE_CACHE[TableName]['data']['deltap_HT_%s_%d' % (species_lower, TrefHT)][RowID]
if deltap == 0.: raise Exception
Tref = TrefHT
except:
Tref = 296.
try:
# search for Voigt-style name
deltap = LOCAL_TABLE_CACHE[TableName]['data']['deltap_%s' % species_lower][RowID]
except:
deltap = 0.0
Shift0T = CustomEnvDependences.get('deltap_HT_%s_%d' % (species_lower, TrefHT),
CustomEnvDependences.get('deltap_%s' % species_lower,
((Shift0DB + deltap * (
T - Tref)) * p / pref)))
Shift0 += abun * Shift0T
# Search for speed dependence for HWHM.
try:
Gamma2DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_HT_2_%s_%d' % (species_lower, TrefHT)][RowID]
if Gamma2DB == 0.: raise Exception
except:
try:
SDDB = LOCAL_TABLE_CACHE[TableName]['data']['SD_%s' % species_lower][RowID]
Gamma2DB = SDDB * Gamma0DB
except:
Gamma2DB = 0.0
Gamma2 += abun * CustomEnvDependences.get('gamma_HT_2_%s_%d' % (species_lower, TrefHT),
Gamma2DB * (p / pref))
# Search for speed dependence for shift.
try:
Delta2DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_HT_2_%s_%d' % (species_lower, TrefHT)][RowID]
except:
Delta2DB = 0.
Delta2 += abun * CustomEnvDependences.get('delta_HT_2_%s_%d' % (species_lower, TrefHT),
Delta2DB * p / pref)
# Search for frequency of VC
try:
NuVCDB = LOCAL_TABLE_CACHE[TableName]['data']['nu_HT_%s' % species_lower][RowID]
except:
NuVCDB = 0.
# Search for temperature exponent for frequency of VC
try:
KappaDB = LOCAL_TABLE_CACHE[TableName]['data']['kappa_HT_%s' % species_lower][RowID]
except:
KappaDB = 0.
NuVC += abun * CustomEnvDependences.get('nu_HT_%s' % species_lower,
NuVCDB * (Tref / T) ** KappaDB * p)
# Setup correlation parameter
try:
EtaDB = LOCAL_TABLE_CACHE[TableName]['data']['eta_HT_%s' % species_lower][RowID]
except:
EtaDB = 0.
EtaNumer += EtaDB * abun * (Gamma0T + 1j * Shift0T)
Eta = EtaNumer / (Gamma0 + 1j * Shift0)
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing, OmegaWingHW * Gamma0, OmegaWingHW * GammaD)
# shift coefficient
# Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
# PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas, LineCenterDB - OmegaWingF)
BoundIndexUpper = bisect(Omegas, LineCenterDB + OmegaWingF)
lineshape_vals = PROFILE_HT(LineCenterDB, GammaD, Gamma0, Gamma2, Shift0, Shift2, NuVC, Eta,
Omegas[BoundIndexLower:BoundIndexUpper])[0]
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
LineIntensity * lineshape_vals
# print(LineCenterDB,GammaD,Gamma0,Gamma2,Shift0,sum(lineshape_vals),sum(Xsect))
# raise Exception
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
def absorptionCoefficient_SDVoigt(Components=None, SourceTables=None, partitionFunction=PYTIPS,
Environment=None, OmegaRange=None, OmegaStep=None, OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None,
WavenumberRange=None, WavenumberStep=None, WavenumberWing=None,
WavenumberWingHW=None, WavenumberGrid=None,
Diluent={}, EnvDependences=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - relative abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
WavenumberRange: wavenumber range to consider.
WavenumberStep: wavenumber step to consider.
WavenumberWing: absolute wing for calculating a lineshape (in cm-1)
WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths)
IntensityThreshold: threshold for intensities
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts for significant digits in WavenumberStep)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid with respect to parameters WavenumberRange and WavenumberStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using SDVoigt profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation.
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_SDVoigt(((2,1),),'co2',WavenumberStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
warn('To get the most up-to-date version please check http://hitran.org/hapi')
# Paremeters OmegaRange,OmegaStep,OmegaWing,OmegaWingHW, and OmegaGrid
# are deprecated and given for backward compatibility with the older versions.
if WavenumberRange: OmegaRange = WavenumberRange
if WavenumberStep: OmegaStep = WavenumberStep
if WavenumberWing: OmegaWing = WavenumberWing
if WavenumberWingHW: OmegaWingHW = WavenumberWingHW
if WavenumberGrid: OmegaGrid = WavenumberGrid
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components, SourceTables, Environment, OmegaRange, OmegaStep, OmegaWing, \
IntensityThreshold, Format = \
getDefaultValuesForXsect(Components, SourceTables, Environment, OmegaRange,
OmegaStep, OmegaWing, IntensityThreshold, Format)
# warn user about too large omega step
if OmegaStep > 0.1: warn('Big wavenumber step: possible accuracy decline')
# get uniform linespace for cross-section
# number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
# Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
# Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
Omegas = arange_(OmegaRange[0], OmegaRange[1], OmegaStep) # fix
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M, I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M, I))
ABUNDANCES[(M, I)] = ni
NATURAL_ABUNDANCES[(M, I)] = ISO[(M, I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p, T)
# setup the default empty environment dependence function
if not EnvDependences:
EnvDependences = lambda ENV, LINE: {}
Env = Environment.copy()
Env['Tref'] = Tref
Env['pref'] = pref
# setup the Diluent variable
GammaL = GammaL.lower()
if not Diluent:
if GammaL == 'gamma_air':
Diluent = {'air': 1.}
elif GammaL == 'gamma_self':
Diluent = {'self': 1.}
else:
raise Exception('Unknown GammaL value: %s' % GammaL)
# Simple check
for key in Diluent:
val = Diluent[key]
if val < 0 and val > 1:
raise Exception('Diluent fraction must be in [0,1]')
# SourceTables contain multiple tables
for TableName in SourceTables:
# get the number of rows
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# get parameter names for each table
parnames = LOCAL_TABLE_CACHE[TableName]['data'].keys()
# loop through line centers (single stream)
for RowID in range(nline):
# Get the custom environment dependences
Line = {parname: LOCAL_TABLE_CACHE[TableName]['data'][parname][RowID] for parname in parnames}
CustomEnvDependences = EnvDependences(Env, Line)
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
# filter by molecule and isotopologue
if (MoleculeNumberDB, IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
SigmaT = partitionFunction(MoleculeNumberDB, IsoNumberDB, T)
SigmaTref = partitionFunction(MoleculeNumberDB, IsoNumberDB, Tref)
# get all environment dependences from voigt parameters
# intensity
if 'sw' in CustomEnvDependences:
LineIntensity = CustomEnvDependences['sw']
else:
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB, T, Tref, SigmaT, SigmaTref,
LowerStateEnergyDB, LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
cMassMol = 1.66053873e-27 # hapi
m = molecularMass(MoleculeNumberDB, IsoNumberDB) * cMassMol * 1000
GammaD = sqrt(2 * cBolts * T * log(2) / m / cc ** 2) * LineCenterDB
# pressure broadening coefficient
Gamma0 = 0.;
Shift0 = 0.;
Gamma2 = 0.;
Shift2 = 0.
for species in Diluent:
species_lower = species.lower()
abun = Diluent[species]
gamma_name = 'gamma_' + species_lower
try:
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][gamma_name][RowID]
except:
Gamma0DB = 0.0
n_name = 'n_' + species_lower
try:
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data'][n_name][RowID]
if species_lower == 'self' and TempRatioPowerDB == 0.:
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][
RowID] # same for self as for air
except:
# TempRatioPowerDB = 0
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
# Add to the final Gamma0
Gamma0 += abun * CustomEnvDependences.get(gamma_name, # default ->
EnvironmentDependency_Gamma0(Gamma0DB, T, Tref, p, pref,
TempRatioPowerDB))
delta_name = 'delta_' + species_lower
try:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data'][delta_name][RowID]
except:
Shift0DB = 0.0
deltap_name = 'deltap_' + species_lower
try:
deltap = LOCAL_TABLE_CACHE[TableName]['data'][deltap_name][RowID]
except:
deltap = 0.0
Shift0 += abun * CustomEnvDependences.get(delta_name, # default ->
((Shift0DB + deltap * (T - Tref)) * p / pref))
SD_name = 'SD_' + species_lower
try:
SDDB = LOCAL_TABLE_CACHE[TableName]['data'][SD_name][RowID]
except:
SDDB = 0.0
Gamma2 += abun * CustomEnvDependences.get(SD_name, # default ->
SDDB * p / pref) * Gamma0DB
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing, OmegaWingHW * Gamma0, OmegaWingHW * GammaD)
# shift coefficient
# Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
# PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas, LineCenterDB - OmegaWingF)
BoundIndexUpper = bisect(Omegas, LineCenterDB + OmegaWingF)
lineshape_vals = PROFILE_SDVOIGT(LineCenterDB, GammaD, Gamma0, Gamma2, Shift0, Shift2,
Omegas[BoundIndexLower:BoundIndexUpper])[0]
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
LineIntensity * lineshape_vals
# print(LineCenterDB,GammaD,Gamma0,Gamma2,Shift0,sum(lineshape_vals),sum(Xsect))
# raise Exception
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
def absorptionCoefficient_Voigt(Components=None, SourceTables=None, partitionFunction=PYTIPS,
Environment=None, OmegaRange=None, OmegaStep=None, OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None,
WavenumberRange=None, WavenumberStep=None, WavenumberWing=None,
WavenumberWingHW=None, WavenumberGrid=None,
Diluent={}, EnvDependences=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - relative abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
WavenumberRange: wavenumber range to consider.
WavenumberStep: wavenumber step to consider.
WavenumberWing: absolute wing for calculating a lineshape (in cm-1)
WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths)
IntensityThreshold: threshold for intensities
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts for significant digits in WavenumberStep)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid with respect to parameters WavenumberRange and WavenumberStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Voigt profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation.
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Voigt(((2,1),),'co2',WavenumberStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# Paremeters OmegaRange,OmegaStep,OmegaWing,OmegaWingHW, and OmegaGrid
# are deprecated and given for backward compatibility with the older versions.
if WavenumberRange: OmegaRange = WavenumberRange
if WavenumberStep: OmegaStep = WavenumberStep
if WavenumberWing: OmegaWing = WavenumberWing
if WavenumberWingHW: OmegaWingHW = WavenumberWingHW
if WavenumberGrid: OmegaGrid = WavenumberGrid
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components, SourceTables, Environment, OmegaRange, OmegaStep, OmegaWing, \
IntensityThreshold, Format = \
getDefaultValuesForXsect(Components, SourceTables, Environment, OmegaRange,
OmegaStep, OmegaWing, IntensityThreshold, Format)
# warn user about too large omega step
if OmegaStep > 0.1: warn('Big wavenumber step: possible accuracy decline')
# get uniform linespace for cross-section
# number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
# Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
# Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
Omegas = arange_(OmegaRange[0], OmegaRange[1], OmegaStep) # fix
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M, I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M, I))
ABUNDANCES[(M, I)] = ni
NATURAL_ABUNDANCES[(M, I)] = ISO[(M, I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p, T)
# setup the default empty environment dependence function
if not EnvDependences:
EnvDependences = lambda ENV, LINE: {}
Env = Environment.copy()
Env['Tref'] = Tref
Env['pref'] = pref
# setup the Diluent variable
GammaL = GammaL.lower()
if not Diluent:
if GammaL == 'gamma_air':
Diluent = {'air': 1.}
elif GammaL == 'gamma_self':
Diluent = {'self': 1.}
else:
raise Exception('Unknown GammaL value: %s' % GammaL)
# Simple check
for key in Diluent:
val = Diluent[key]
if val < 0 and val > 1:
raise Exception('Diluent fraction must be in [0,1]')
# SourceTables contain multiple tables
for TableName in SourceTables:
# get the number of rows
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# get parameter names for each table
parnames = LOCAL_TABLE_CACHE[TableName]['data'].keys()
# loop through line centers (single stream)
for RowID in range(nline):
# Get the custom environment dependences
Line = {parname: LOCAL_TABLE_CACHE[TableName]['data'][parname][RowID] for parname in parnames}
CustomEnvDependences = EnvDependences(Env, Line)
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
# filter by molecule and isotopologue
if (MoleculeNumberDB, IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
SigmaT = partitionFunction(MoleculeNumberDB, IsoNumberDB, T)
SigmaTref = partitionFunction(MoleculeNumberDB, IsoNumberDB, Tref)
# get all environment dependences from voigt parameters
# intensity
if 'sw' in CustomEnvDependences:
LineIntensity = CustomEnvDependences['sw']
else:
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB, T, Tref, SigmaT, SigmaTref,
LowerStateEnergyDB, LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
cMassMol = 1.66053873e-27 # hapi
m = molecularMass(MoleculeNumberDB, IsoNumberDB) * cMassMol * 1000
GammaD = sqrt(2 * cBolts * T * log(2) / m / cc ** 2) * LineCenterDB
# pressure broadening coefficient
Gamma0 = 0.;
Shift0 = 0.;
for species in Diluent:
species_lower = species.lower()
abun = Diluent[species]
gamma_name = 'gamma_' + species_lower
try:
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][gamma_name][RowID]
except:
Gamma0DB = 0.0
n_name = 'n_' + species_lower
try:
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data'][n_name][RowID]
if species_lower == 'self' and TempRatioPowerDB == 0.:
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][
RowID] # same for self as for air
except:
# TempRatioPowerDB = 0
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
# Add to the final Gamma0
Gamma0 += abun * CustomEnvDependences.get(gamma_name, # default ->
EnvironmentDependency_Gamma0(Gamma0DB, T, Tref, p, pref,
TempRatioPowerDB))
delta_name = 'delta_' + species_lower
try:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data'][delta_name][RowID]
except:
Shift0DB = 0.0
deltap_name = 'deltap_' + species_lower
try:
deltap = LOCAL_TABLE_CACHE[TableName]['data'][deltap_name][RowID]
except:
deltap = 0.0
Shift0 += abun * CustomEnvDependences.get(delta_name, # default ->
((Shift0DB + deltap * (T - Tref)) * p / pref))
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing, OmegaWingHW * Gamma0, OmegaWingHW * GammaD)
# shift coefficient
# Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
# PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas, LineCenterDB - OmegaWingF)
BoundIndexUpper = bisect(Omegas, LineCenterDB + OmegaWingF)
lineshape_vals = \
PROFILE_VOIGT(LineCenterDB + Shift0, GammaD, Gamma0, Omegas[BoundIndexLower:BoundIndexUpper])[0]
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
def absorptionCoefficient_Lorentz(Components=None, SourceTables=None, partitionFunction=PYTIPS,
Environment=None, OmegaRange=None, OmegaStep=None, OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None,
WavenumberRange=None, WavenumberStep=None, WavenumberWing=None,
WavenumberWingHW=None, WavenumberGrid=None,
Diluent={}, EnvDependences=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - relative abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
WavenumberRange: wavenumber range to consider.
WavenumberStep: wavenumber step to consider.
WavenumberWing: absolute wing for calculating a lineshape (in cm-1)
WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths)
IntensityThreshold: threshold for intensities
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts for significant digits in WavenumberStep)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid with respect to parameters WavenumberRange and WavenumberStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Lorentz profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation.
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Lorentz(((2,1),),'co2',WavenumberStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# Paremeters OmegaRange,OmegaStep,OmegaWing,OmegaWingHW, and OmegaGrid
# are deprecated and given for backward compatibility with the older versions.
if WavenumberRange: OmegaRange = WavenumberRange
if WavenumberStep: OmegaStep = WavenumberStep
if WavenumberWing: OmegaWing = WavenumberWing
if WavenumberWingHW: OmegaWingHW = WavenumberWingHW
if WavenumberGrid: OmegaGrid = WavenumberGrid
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components, SourceTables, Environment, OmegaRange, OmegaStep, OmegaWing, \
IntensityThreshold, Format = \
getDefaultValuesForXsect(Components, SourceTables, Environment, OmegaRange,
OmegaStep, OmegaWing, IntensityThreshold, Format)
# warn user about too large omega step
if OmegaStep > 0.1: warn('Big wavenumber step: possible accuracy decline')
# get uniform linespace for cross-section
# number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
# Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
# Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
Omegas = arange_(OmegaRange[0], OmegaRange[1], OmegaStep) # fix
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M, I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M, I))
ABUNDANCES[(M, I)] = ni
NATURAL_ABUNDANCES[(M, I)] = ISO[(M, I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p, T)
# setup the default empty environment dependence function
if not EnvDependences:
EnvDependences = lambda ENV, LINE: {}
Env = Environment.copy()
Env['Tref'] = Tref
Env['pref'] = pref
# setup the Diluent variable
GammaL = GammaL.lower()
if not Diluent:
if GammaL == 'gamma_air':
Diluent = {'air': 1.}
elif GammaL == 'gamma_self':
Diluent = {'self': 1.}
else:
raise Exception('Unknown GammaL value: %s' % GammaL)
# Simple check
for key in Diluent:
val = Diluent[key]
if val < 0 and val > 1:
raise Exception('Diluent fraction must be in [0,1]')
# SourceTables contain multiple tables
for TableName in SourceTables:
# get the number of rows
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# get parameter names for each table
parnames = LOCAL_TABLE_CACHE[TableName]['data'].keys()
# loop through line centers (single stream)
for RowID in range(nline):
# Get the custom environment dependences
Line = {parname: LOCAL_TABLE_CACHE[TableName]['data'][parname][RowID] for parname in parnames}
CustomEnvDependences = EnvDependences(Env, Line)
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
# filter by molecule and isotopologue
if (MoleculeNumberDB, IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
SigmaT = partitionFunction(MoleculeNumberDB, IsoNumberDB, T)
SigmaTref = partitionFunction(MoleculeNumberDB, IsoNumberDB, Tref)
# get all environment dependences from voigt parameters
# intensity
if 'sw' in CustomEnvDependences:
LineIntensity = CustomEnvDependences['sw']
else:
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB, T, Tref, SigmaT, SigmaTref,
LowerStateEnergyDB, LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
if LineIntensity < IntensityThreshold: continue
# pressure broadening coefficient
Gamma0 = 0.;
Shift0 = 0.;
for species in Diluent:
species_lower = species.lower()
abun = Diluent[species]
gamma_name = 'gamma_' + species_lower
try:
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][gamma_name][RowID]
except:
Gamma0DB = 0.0
n_name = 'n_' + species_lower
try:
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data'][n_name][RowID]
if species_lower == 'self' and TempRatioPowerDB == 0.:
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][
RowID] # same for self as for air
except:
# TempRatioPowerDB = 0
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
# Add to the final Gamma0
Gamma0 += abun * CustomEnvDependences.get(gamma_name, # default ->
EnvironmentDependency_Gamma0(Gamma0DB, T, Tref, p, pref,
TempRatioPowerDB))
delta_name = 'delta_' + species_lower
try:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data'][delta_name][RowID]
except:
Shift0DB = 0.0
deltap_name = 'deltap_' + species_lower
try:
deltap = LOCAL_TABLE_CACHE[TableName]['data'][deltap_name][RowID]
except:
deltap = 0.0
Shift0 += abun * CustomEnvDependences.get(delta_name, # default ->
((Shift0DB + deltap * (T - Tref)) * p / pref))
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing, OmegaWingHW * Gamma0)
# shift coefficient
# Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
# PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas, LineCenterDB - OmegaWingF)
BoundIndexUpper = bisect(Omegas, LineCenterDB + OmegaWingF)
lineshape_vals = PROFILE_LORENTZ(LineCenterDB + Shift0, Gamma0, Omegas[BoundIndexLower:BoundIndexUpper])
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
# Alias for a profile selector
absorptionCoefficient = absorptionCoefficient_HT
# ==========================================================================================
# =========================== /NEW ABSORPTION COEFFICIENT ===================================
# ==========================================================================================
# calculate apsorption for Doppler profile
def absorptionCoefficient_Doppler(Components=None, SourceTables=None, partitionFunction=PYTIPS,
Environment=None, OmegaRange=None, OmegaStep=None, OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL='dummy', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None,
WavenumberRange=None, WavenumberStep=None, WavenumberWing=None,
WavenumberWingHW=None, WavenumberGrid=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
WavenumberRange: wavenumber range to consider.
WavenumberStep: wavenumber step to consider.
WavenumberWing: absolute wing for calculating a lineshape (in cm-1)
WavenumberWingHW: relative wing for calculating a lineshape (in halfwidths)
IntensityThreshold: threshold for intensities
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts for significant digits in WavenumberStep)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Doppler (Gauss) profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation.
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which give a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Doppler(((2,1),),'co2',WavenumberStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
if WavenumberRange: OmegaRange = WavenumberRange
if WavenumberStep: OmegaStep = WavenumberStep
if WavenumberWing: OmegaWing = WavenumberWing
if WavenumberWingHW: OmegaWingHW = WavenumberWingHW
if WavenumberGrid: OmegaGrid = WavenumberGrid
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components, SourceTables, Environment, OmegaRange, OmegaStep, OmegaWing, \
IntensityThreshold, Format = \
getDefaultValuesForXsect(Components, SourceTables, Environment, OmegaRange,
OmegaStep, OmegaWing, IntensityThreshold, Format)
# special for Doppler case: set OmegaStep to a smaller value
if not OmegaStep: OmegaStep = 0.001
# warn user about too large omega step
if OmegaStep > 0.005: warn('Big wavenumber step: possible accuracy decline')
# get uniform linespace for cross-section
# number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
# Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
# Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
Omegas = arange_(OmegaRange[0], OmegaRange[1], OmegaStep) # fix
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M, I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M, I))
ABUNDANCES[(M, I)] = ni
NATURAL_ABUNDANCES[(M, I)] = ISO[(M, I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p, T)
# SourceTables contain multiple tables
for TableName in SourceTables:
# get line centers
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# loop through line centers (single stream)
for RowID in range(nline):
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID]
else:
Shift0DB = 0
# filter by molecule and isotopologue
if (MoleculeNumberDB, IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
# TODO: optimize
SigmaT = partitionFunction(MoleculeNumberDB, IsoNumberDB, T)
SigmaTref = partitionFunction(MoleculeNumberDB, IsoNumberDB, Tref)
# get all environment dependences from voigt parameters
# intensity
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB, T, Tref, SigmaT, SigmaTref,
LowerStateEnergyDB, LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
# TODO: apply wing narrowing instead of filtering, this would be more appropriate
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
# GammaDDB = cSqrtLn2*LineCenterDB/cc*sqrt(2*cBolts*T/molecularMass(MoleculeNumberDB,IsoNumberDB))
# GammaD = EnvironmentDependency_GammaD(GammaDDB,T,Tref)
# print(GammaD)
cMassMol = 1.66053873e-27
# cSqrt2Ln2 = 1.1774100225
fSqrtMass = sqrt(molecularMass(MoleculeNumberDB, IsoNumberDB))
# fSqrtMass = sqrt(32831.2508809)
cc_ = 2.99792458e8
cBolts_ = 1.3806503e-23
# cBolts_ = 1.3806488E-23
GammaD = (cSqrt2Ln2 / cc_) * sqrt(cBolts_ / cMassMol) * sqrt(T) * LineCenterDB / fSqrtMass
# GammaD = 4.30140e-7*LineCenterDB*sqrt(T/molecularMass(MoleculeNumberDB,IsoNumberDB))
# cc_ = 2.99792458e8 # 2.99792458e10 # 2.99792458e8
# cBolts_ = 1.3806503e-23 #1.3806488E-16 # 1.380648813E-16 # 1.3806503e-23 # 1.3806488E-23
# GammaD = sqrt(log(2))*LineCenterDB*sqrt(2*cBolts_*T/(cMassMol*molecularMass(MoleculeNumberDB,IsoNumberDB)*cc_**2))
# print(GammaD)
# get final wing of the line according to GammaD, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing, OmegaWingHW * GammaD)
# shift coefficient
Shift0 = Shift0DB * p / pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
# PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas, LineCenterDB - OmegaWingF)
BoundIndexUpper = bisect(Omegas, LineCenterDB + OmegaWingF)
lineshape_vals = PROFILE_DOPPLER(LineCenterDB + Shift0, GammaD, Omegas[BoundIndexLower:BoundIndexUpper])
# lineshape_vals = PROFILE_VOIGT(LineCenterDB,GammaD,cZero,Omegas[BoundIndexLower:BoundIndexUpper])[0]
# Xsect[BoundIndexLower:BoundIndexUpper] += lineshape_vals # DEBUG
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB, IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
# ---------------------------------------------------------------------------
# SHORTCUTS AND ALIASES FOR ABSORPTION COEFFICIENTS
# ---------------------------------------------------------------------------
absorptionCoefficient_Gauss = absorptionCoefficient_Doppler
def abscoef_HT(table=None, step=None, grid=None, env={'T': 296., 'p': 1.}, file=None):
return absorptionCoefficient_HT(SourceTables=table, OmegaStep=step, OmegaGrid=grid, Environment=env, File=file)
def abscoef_Voigt(table=None, step=None, grid=None, env={'T': 296., 'p': 1.}, file=None):
return absorptionCoefficient_Voigt(SourceTables=table, OmegaStep=step, OmegaGrid=grid, Environment=env, File=file)
def abscoef_Lorentz(table=None, step=None, grid=None, env={'T': 296., 'p': 1.}, file=None):
return absorptionCoefficient_Lorentz(SourceTables=table, OmegaStep=step, OmegaGrid=grid, Environment=env, File=file)
def abscoef_Doppler(table=None, step=None, grid=None, env={'T': 296., 'p': 1.}, file=None):
return absorptionCoefficient_Doppler(SourceTables=table, OmegaStep=step, OmegaGrid=grid, Environment=env, File=file)
abscoef_Gauss = abscoef_Doppler
def abscoef(table=None, step=None, grid=None, env={'T': 296., 'p': 1.}, file=None): # default
return absorptionCoefficient_Lorentz(SourceTables=table, OmegaStep=step, OmegaGrid=grid, Environment=env, File=file)
# ---------------------------------------------------------------------------
def transmittanceSpectrum(Omegas, AbsorptionCoefficient, Environment={'l': 100.},
File=None, Format='%e %e', Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
Default={'l':100.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: transmittance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a transmittance spectrum (dimensionless) based
on previously calculated absorption coefficient.
Transmittance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default)
---
EXAMPLE OF USAGE:
nu,trans = transmittanceSpectrum(nu,coef)
---
"""
# compatibility with older versions
if Wavenumber: Omegas = Wavenumber
l = Environment['l']
Xsect = exp(-AbsorptionCoefficient * l)
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
def absorptionSpectrum(Omegas, AbsorptionCoefficient, Environment={'l': 100.},
File=None, Format='%e %e', Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
Default={'l':100.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: transmittance spectrum calculated on the grid
---
DESCRIPTION:
Calculate an absorption spectrum (dimensionless) based
on previously calculated absorption coefficient.
Absorption spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default)
---
EXAMPLE OF USAGE:
nu,absorp = absorptionSpectrum(nu,coef)
---
"""
# compatibility with older versions
if Wavenumber: Omegas = Wavenumber
l = Environment['l']
Xsect = 1 - exp(-AbsorptionCoefficient * l)
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
def radianceSpectrum(Omegas, AbsorptionCoefficient, Environment={'l': 100., 'T': 296.},
File=None, Format='%e %e', Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omegas: wavenumber grid (required)
AbsorptionCoefficient: absorption coefficient on grid (required)
Environment: dictionary containing path length in cm.
and temperature in Kelvin.
Default={'l':100.,'T':296.}
File: name of the output file (optional)
Format: c format used in file output, default '%e %e' (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
Xsect: radiance spectrum calculated on the grid
---
DESCRIPTION:
Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based
on previously calculated absorption coefficient.
Radiance spectrum is calculated at an arbitrary
optical path length 'l' (1 m by default) and
temperature 'T' (296 K by default). For obtaining a
physically meaningful result 'T' must be the same
as a temperature which was used in absorption coefficient.
---
EXAMPLE OF USAGE:
nu,radi = radianceSpectrum(nu,coef)
---
"""
# compatibility with older versions
if Wavenumber: Omegas = Wavenumber
l = Environment['l']
T = Environment['T']
Alw = 1 - exp(-AbsorptionCoefficient * l)
LBBTw = 2 * hh * cc ** 2 * Omegas ** 3 / (exp(hh * cc * Omegas / (cBolts * T)) - 1) * 1.0E-7
Xsect = Alw * LBBTw # W/sr/cm**2/cm**-1
if File: save_to_file(File, Format, Omegas, Xsect)
return Omegas, Xsect
# GET X,Y FOR FINE PLOTTING OF A STICK SPECTRUM
def getStickXY(TableName):
"""
Get X and Y for fine plotting of a stick spectrum.
Usage: X,Y = getStickXY(TableName).
"""
cent, intens = getColumns(TableName, ('nu', 'sw'))
n = len(cent)
cent_ = zeros(n * 3)
intens_ = zeros(n * 3)
for i in range(n):
intens_[3 * i] = 0
intens_[3 * i + 1] = intens[i]
intens_[3 * i + 2] = 0
cent_[(3 * i):(3 * i + 3)] = cent[i]
return cent_, intens_
# /GET X,Y FOR FINE PLOTTING OF A STICK SPECTRUM
# LOW-RES SPECTRA (CONVOLUTION WITH APPARATUS FUNCTION)
# /LOW-RES SPECTRA (CONVOLUTION WITH APPARATUS FUNCTION)
# /----------------------------------------------------------------------------
# ------------------ HITRAN-ON-THE-WEB COMPATIBILITY -------------------------
def read_hotw(filename):
"""
Read cross-section file fetched from HITRAN-on-the-Web.
The format of the file line must be as follows:
nu, coef
Other lines are omitted.
"""
import sys
f = open(filename, 'r')
nu = []
coef = []
for line in f:
pars = line.split()
try:
nu.append(float(pars[0]))
coef.append(float(pars[1]))
except:
if False:
print(sys.exc_info())
else:
pass
return array(nu), array(coef)
# alias for read_hotw for backwards compatibility
read_xsect = read_hotw
# /----------------------------------------------------------------------------
# ------------------ SPECTRAL CONVOLUTION -------------------------
# rectangular slit function
def SLIT_RECTANGULAR(x, g):
"""
Instrumental (slit) function.
B(x) = 1/γ , if |x| ≤ γ/2 & B(x) = 0, if |x| > γ/2,
where γ is a slit width or the instrumental resolution.
"""
index_inner = abs(x) <= g / 2
index_outer = ~index_inner
y = zeros(len(x))
y[index_inner] = 1 / g
y[index_outer] = 0
return y
# triangular slit function
def SLIT_TRIANGULAR(x, g):
"""
Instrumental (slit) function.
B(x) = 1/γ*(1-|x|/γ), if |x| ≤ γ & B(x) = 0, if |x| > γ,
where γ is the line width equal to the half base of the triangle.
"""
index_inner = abs(x) <= g
index_outer = ~index_inner
y = zeros(len(x))
y[index_inner] = 1 / g * (1 - abs(x[index_inner]) / g)
y[index_outer] = 0
return y
# gaussian slit function
def SLIT_GAUSSIAN(x, g):
"""
Instrumental (slit) function.
B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2),
where γ/2 is a gaussian half-width at half-maximum.
"""
g /= 2
return sqrt(log(2)) / (sqrt(pi) * g) * exp(-log(2) * (x / g) ** 2)
# dispersion slit function
def SLIT_DISPERSION(x, g):
"""
Instrumental (slit) function.
B(x) = γ/pi/(x**2+γ**2),
where γ/2 is a lorentzian half-width at half-maximum.
"""
g /= 2
return g / pi / (x ** 2 + g ** 2)
# cosinus slit function
def SLIT_COSINUS(x, g):
return (cos(pi / g * x) + 1) / (2 * g)
# diffraction slit function
def SLIT_DIFFRACTION(x, g):
"""
Instrumental (slit) function.
"""
y = zeros(len(x))
index_zero = x == 0
index_nonzero = ~index_zero
dk_ = pi / g
x_ = dk_ * x[index_nonzero]
w_ = sin(x_)
r_ = w_ ** 2 / x_ ** 2
y[index_zero] = 1
y[index_nonzero] = r_ / g
return y
# apparatus function of the ideal Michelson interferometer
def SLIT_MICHELSON(x, g):
"""
Instrumental (slit) function.
B(x) = 2/γ*sin(2pi*x/γ)/(2pi*x/γ) if x!=0 else 1,
where 1/γ is the maximum optical path difference.
"""
y = zeros(len(x))
index_zero = x == 0
index_nonzero = ~index_zero
dk_ = 2 * pi / g
x_ = dk_ * x[index_nonzero]
y[index_zero] = 1
y[index_nonzero] = 2 / g * sin(x_) / x_
return y
# spectral convolution with an apparatus (slit) function
def convolveSpectrum(Omega, CrossSection, Resolution=0.1, AF_wing=10.,
SlitFunction=SLIT_RECTANGULAR, Wavenumber=None):
"""
INPUT PARAMETERS:
Wavenumber/Omega: wavenumber grid (required)
CrossSection: high-res cross section calculated on grid (required)
Resolution: instrumental resolution γ (optional)
AF_wing: instrumental function wing (optional)
SlitFunction: instrumental function for low-res spectra calculation (optional)
OUTPUT PARAMETERS:
Wavenum: wavenumber grid
CrossSection: low-res cross section calculated on grid
i1: lower index in Omega input
i2: higher index in Omega input
slit: slit function calculated over grid [-AF_wing; AF_wing]
with the step equal to instrumental resolution.
---
DESCRIPTION:
Produce a simulation of experimental spectrum via the convolution
of a “dry†spectrum with an instrumental function.
Instrumental function is provided as a parameter and
is calculated in a grid with the width=AF_wing and step=Resolution.
---
EXAMPLE OF USAGE:
nu_,radi_,i,j,slit = convolveSpectrum(nu,radi,Resolution=2.0,AF_wing=10.0,
SlitFunction=SLIT_MICHELSON)
---
"""
# compatibility with older versions
if Wavenumber: Omega = Wavenumber
step = Omega[1] - Omega[0]
if step >= Resolution: raise Exception('step must be less than resolution')
# x = arange(-AF_wing,AF_wing+step,step)
x = arange_(-AF_wing, AF_wing + step, step) # fix
slit = SlitFunction(x, Resolution)
# FIXING THE BUG: normalize slit function
slit /= sum(slit) * step # simple normalization
left_bnd = len(slit) / 2
right_bnd = len(Omega) - len(slit) / 2
# CrossSectionLowRes = convolve(CrossSection,slit,mode='valid')*step
CrossSectionLowRes = convolve(CrossSection, slit, mode='same') * step
# return Omega[left_bnd:right_bnd],CrossSectionLowRes,left_bnd,right_bnd,slit
return Omega[left_bnd:right_bnd], CrossSectionLowRes[left_bnd:right_bnd], left_bnd, right_bnd, slit
# DEBUG
# spectral convolution with an apparatus (slit) function
def convolveSpectrumSame(Omega, CrossSection, Resolution=0.1, AF_wing=10.,
SlitFunction=SLIT_RECTANGULAR):
"""
Convolves cross section with a slit function with given parameters.
"""
step = Omega[1] - Omega[0]
x = arange(-AF_wing, AF_wing + step, step)
slit = SlitFunction(x, Resolution)
print('step=')
print(step)
print('x=')
print(x)
print('slitfunc=')
print(SlitFunction)
CrossSectionLowRes = convolve(CrossSection, slit, mode='same') * step
return Omega, CrossSectionLowRes, None, None, slit
# DEBUG
def convolveSpectrumFull(Omega, CrossSection, Resolution=0.1, AF_wing=10., SlitFunction=SLIT_RECTANGULAR):
"""
Convolves cross section with a slit function with given parameters.
"""
step = Omega[1] - Omega[0]
x = arange(-AF_wing, AF_wing + step, step)
slit = SlitFunction(x, Resolution)
print('step=')
print(step)
print('x=')
print(x)
print('slitfunc=')
print(SlitFunction)
CrossSectionLowRes = convolve(CrossSection, slit, mode='full') * step
return Omega, CrossSectionLowRes, None, None
# ------------------------------------------------------------------
|
gpl-3.0
|
tomchor/pymicra
|
pymicra/algs/auxiliar.py
|
1
|
3396
|
from __future__ import absolute_import, print_function, division
"""
"""
def stripDown(str, final='', args=['_', '-']):
"""
Auxiliar function to strip down keywords from symbols
"""
for arg in args:
str=str.replace(arg,final)
return str
def lenYear(year):
"""
Calculates the length of a year in days
Useful to figure out if a certain year is a leap year
"""
import calendar
feblen=calendar.monthrange(year,2)[1]
otherlens=365-28
return feblen + otherlens
def testValid(df_valid, testname='', failverbose=True, passverbose=True, filepath=None):
"""
Tests a boolean DataFrane obtained from the test and prints standard output
Parameters
-----------
df_valid: pandas.Series
series contaning only True or False values for each of the variables, which should be the indexes
testname: string
the name of the test that generated the True/False values
failverbose: bool
whether to return which variables caused a false result
passverbose: bool
whether to print something successful cases
Returns
--------
result: bool
True if the run passed the passed
failed: list
list of failed variables if result==False. None otherwise.
"""
from os.path import basename
if False in df_valid.values:
failed = df_valid[ df_valid==False ].index
print(basename(filepath), ': !FAILED',testname,'test!\n')
if failverbose:
print('Failed variable(s):', ', '.join(failed),'\n')
print
return False, failed
else:
if passverbose: print(basename(filepath),'passed',testname,'test')
return True, None
def applyResult(result, failed, df, control=None, testname=None, filename=None, failshow=False, index_n=None):
"""
Auxiliar function to be used with util.qcontrol
Parameters
-----------
result: bool
whether the test failed and succeeded
failed: list
list of failed variables. None object if the test was successful
control: dictionary
dictionary whose keys are the names of the tests and items are lists
testname: string
name of the test (has to match control dict)
filename: string
name or path or identifier of the file tested
failshow: bool
whether to show the failed variables or not
"""
import matplotlib.pyplot as plt
if result==False:
if failshow:
df[failed].plot()
plt.show()
if type(control)==dict:
control[testname].append(filename)
else:
control.loc[ index_n, testname ] = filename
return control
def first_last(fname):
"""
Returns first and last lines of a file
"""
with open(fname, 'rb') as fin:
first=fin.readline()
for line in fin:
pass
last=line
return first, last
def _completeHM(string):
"""
Deprecated.
Completes %H%M strings for cases when 2hours 0 minutes appear
as 020. Should be dropped eventually because this is pretty much a hack that
corrects for file configuration
"""
if string.isdigit():
pass
else:
raise TypeError('String passed must contain only digits. Check the argument')
if len(string)==3:
string='0'+string
return string
|
gpl-3.0
|
guacamoleo/clBLAS
|
src/scripts/perf/plotPerformance.py
|
16
|
12286
|
# ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
# to use this script, you will need to download and install the 32-BIT VERSION of:
# - Python 2.7 x86 (32-bit) - http://www.python.org/download/releases/2.7.1
#
# you will also need the 32-BIT VERSIONS of the following packages as not all the packages are available in 64bit at the time of this writing
# The ActiveState python distribution is recommended for windows
# (make sure to get the python 2.7-compatible packages):
# - NumPy 1.5.1 (32-bit, 64-bit unofficial, supports Python 2.4 - 2.7 and 3.1 - 3.2.) - http://sourceforge.net/projects/numpy/files/NumPy/
# - matplotlib 1.0.1 (32-bit & 64-bit, supports Python 2.4 - 2.7) - http://sourceforge.net/projects/matplotlib/files/matplotlib/
#
# For ActiveState Python, all that one should need to type is 'pypm install matplotlib'
import datetime
import sys
import argparse
import subprocess
import itertools
import os
import matplotlib
import pylab
from matplotlib.backends.backend_pdf import PdfPages
from blasPerformanceTesting import *
def plotGraph(dataForAllPlots, title, plottype, plotkwargs, xaxislabel, yaxislabel):
"""
display a pretty graph
"""
colors = ['k','y','m','c','r','b','g']
#plottype = 'plot'
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata,
'{}.-'.format(colors.pop()),
label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
pylab.savefig(args.outputFilename,dpi=(1024/8))
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
######## plotFromDataFile() Function to plot from data file begins ########
def plotFromDataFile():
data = []
"""
read in table(s) from file(s)
"""
for thisFile in args.datafile:
if not os.path.isfile(thisFile):
print 'No file with the name \'{}\' exists. Please indicate another filename.'.format(thisFile)
quit()
results = open(thisFile, 'r')
results_contents = results.read()
results_contents = results_contents.rstrip().split('\n')
firstRow = results_contents.pop(0)
print firstRow
print blas_table_header()
print firstRow.rstrip()==blas_table_header()
if firstRow.rstrip() != blas_table_header():
print 'ERROR: input file \'{}\' does not match expected format.'.format(thisFile)
quit()
for row in results_contents:
row = row.split(',')
row = TableRow(BlasTestCombination(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14], row[15], row[16], row[17][1:], row[17][0], row[18], row[19], row[20]), row[21])
data.append(BlasGraphPoint(row.parameters.sizem, row.parameters.sizen, row.parameters.sizek, row.parameters.lda, row.parameters.ldb, row.parameters.ldc, row.parameters.offa , row.parameters.offb , row.parameters.offc , row.parameters.device, row.parameters.order, row.parameters.transa, row.parameters.transb, row.parameters.precision + row.parameters.function, row.parameters.library, row.parameters.label, row.gflops))
"""
data sanity check
"""
# if multiple plotvalues have > 1 value among the data rows, the user must specify which to plot
multiplePlotValues = []
for option in plotvalues:
values = []
for point in data:
values.append(getattr(point, option))
multiplePlotValues.append(len(set(values)) > 1)
if multiplePlotValues.count(True) > 1 and args.plot == None:
print 'ERROR: more than one parameter of {} has multiple values. Please specify which parameter to plot with --plot'.format(plotvalues)
quit()
# if args.graphxaxis is not 'problemsize', the user should know that the results might be strange
#if args.graphxaxis != 'problemsize':
# xaxisvalueSet = []
# for option in xaxisvalues:
# if option != 'problemsize':
# values = []
# for point in data:
# values.append(getattr(point, option))
# xaxisvalueSet.append(len(set(values)) > 1)
# if xaxisvalueSet.count(True) > 1:
# print 'WARNING: more than one parameter of {} is varied. unexpected results may occur. please double check your graphs for accuracy.'.format(xaxisvalues)
# multiple rows should not have the same input values
#pointInputs = []
#for point in data:
# pointInputs.append(point.__str__().split(';')[0])
#if len(set(pointInputs)) != len(data):
# print 'ERROR: imported table has duplicate rows with identical input parameters'
# quit()
"""
figure out if we have multiple plots on this graph (and what they should be)
"""
if args.plot != None:
multiplePlots = args.plot
elif multiplePlotValues.count(True) == 1 and plotvalues[multiplePlotValues.index(True)] != 'sizek':
# we don't ever want to default to sizek, because it's probably going to vary for most plots
# we'll require the user to explicitly request multiple plots on sizek if necessary
multiplePlots = plotvalues[multiplePlotValues.index(True)]
else:
# default to device if none of the options to plot have multiple values
multiplePlots = 'device'
"""
assemble data for the graphs
"""
data.sort(key=lambda row: int(getattr(row, args.graphxaxis)))
# choose scale for x axis
if args.xaxisscale == None:
# user didn't specify. autodetect
if int(getattr(data[len(data)-1], args.graphxaxis)) > 2000: # big numbers on x-axis
args.xaxisscale = 'log2'
elif int(getattr(data[len(data)-1], args.graphxaxis)) > 10000: # bigger numbers on x-axis
args.xaxisscale = 'log10'
else: # small numbers on x-axis
args.xaxisscale = 'linear'
if args.xaxisscale == 'linear':
plotkwargs = {}
plottype = 'plot'
elif args.xaxisscale == 'log2':
plottype = 'semilogx'
plotkwargs = {'basex':2}
elif args.xaxisscale == 'log10':
plottype = 'semilogx'
plotkwargs = {'basex':10}
else:
print 'ERROR: invalid value for x-axis scale'
quit()
plots = set(getattr(row, multiplePlots) for row in data)
class DataForOnePlot:
def __init__(self, inlabel, inxdata, inydata):
self.label = inlabel
self.xdata = inxdata
self.ydata = inydata
dataForAllPlots = []
for plot in plots:
dataForThisPlot = itertools.ifilter( lambda x: getattr(x, multiplePlots) == plot, data)
dataForThisPlot = list(itertools.islice(dataForThisPlot, None))
#if args.graphxaxis == 'problemsize':
# xdata = [int(row.x) * int(row.y) * int(row.z) * int(row.batchsize) for row in dataForThisPlot]
#else:
xdata = [getattr(row, args.graphxaxis) for row in dataForThisPlot]
ydata = [getattr(row, args.graphyaxis) for row in dataForThisPlot]
dataForAllPlots.append(DataForOnePlot(plot,xdata,ydata))
"""
assemble labels for the graph or use the user-specified ones
"""
if args.graphtitle:
# use the user selection
title = args.graphtitle
else:
# autogen a lovely title
title = 'Performance vs. ' + args.graphxaxis.capitalize()
if args.xaxislabel:
# use the user selection
xaxislabel = args.xaxislabel
else:
# autogen a lovely x-axis label
if args.graphxaxis == 'cachesize':
units = '(bytes)'
else:
units = '(datapoints)'
xaxislabel = args.graphxaxis + ' ' + units
if args.yaxislabel:
# use the user selection
yaxislabel = args.yaxislabel
else:
# autogen a lovely y-axis label
if args.graphyaxis == 'gflops':
units = 'GFLOPS'
yaxislabel = 'Performance (' + units + ')'
"""
display a pretty graph
"""
colors = ['k','y','m','c','r','b','g']
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata, '{}.-'.format(colors.pop()), label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
pylab.savefig(args.outputFilename,dpi=(1024/8))
######### plotFromDataFile() Function to plot from data file ends #########
######## "main" program begins #####
"""
define and parse parameters
"""
xaxisvalues = ['sizem','sizen','sizek']
yaxisvalues = ['gflops']
plotvalues = ['lda','ldb','ldc','sizek','device','label','order','transa','transb','function','library']
parser = argparse.ArgumentParser(description='Plot performance of the clblas\
library. clblas.plotPerformance.py reads in data tables from clblas.\
measurePerformance.py and plots their values')
fileOrDb = parser.add_mutually_exclusive_group(required=True)
fileOrDb.add_argument('-d', '--datafile',
dest='datafile', action='append', default=None, required=False,
help='indicate a file to use as input. must be in the format output by\
clblas.measurePerformance.py. may be used multiple times to indicate\
multiple input files. e.g., -d cypressOutput.txt -d caymanOutput.txt')
parser.add_argument('-x', '--x_axis',
dest='graphxaxis', default=None, choices=xaxisvalues, required=True,
help='indicate which value will be represented on the x axis. problemsize\
is defined as x*y*z*batchsize')
parser.add_argument('-y', '--y_axis',
dest='graphyaxis', default='gflops', choices=yaxisvalues,
help='indicate which value will be represented on the y axis')
parser.add_argument('--plot',
dest='plot', default=None, choices=plotvalues,
help='indicate which of {} should be used to differentiate multiple plots.\
this will be chosen automatically if not specified'.format(plotvalues))
parser.add_argument('--title',
dest='graphtitle', default=None,
help='the desired title for the graph generated by this execution. if\
GRAPHTITLE contains any spaces, it must be entered in \"double quotes\".\
if this option is not specified, the title will be autogenerated')
parser.add_argument('--x_axis_label',
dest='xaxislabel', default=None,
help='the desired label for the graph\'s x-axis. if XAXISLABEL contains\
any spaces, it must be entered in \"double quotes\". if this option\
is not specified, the x-axis label will be autogenerated')
parser.add_argument('--x_axis_scale',
dest='xaxisscale', default=None, choices=['linear','log2','log10'],
help='the desired scale for the graph\'s x-axis. if nothing is specified,\
it will be selected automatically')
parser.add_argument('--y_axis_label',
dest='yaxislabel', default=None,
help='the desired label for the graph\'s y-axis. if YAXISLABEL contains any\
spaces, it must be entered in \"double quotes\". if this option is not\
specified, the y-axis label will be autogenerated')
parser.add_argument('--outputfile',
dest='outputFilename', default=None,
help='name of the file to output graphs. Supported formats: emf, eps, pdf, png, ps, raw, rgba, svg, svgz.')
args = parser.parse_args()
if args.datafile != None:
plotFromDataFile()
else:
print "Atleast specify if you want to use text files or database for plotting graphs. Use -h or --help option for more details"
quit()
|
apache-2.0
|
LevinJ/Supply-demand-forecasting
|
preprocess/splittrainvalidation.py
|
1
|
7482
|
from sklearn.cross_validation import KFold
import numpy as np
from datetime import datetime
from datetime import timedelta
from enum import Enum
class HoldoutSplitMethod(Enum):
KFOLD_BYDATE = 4
kFOLD_FORWARD_CHAINING = 5
IMITTATE_TEST2_MIN = 6
IMITTATE_TEST2_FULL = 7
IMITTATE_TEST2_PLUS1 = 8
IMITTATE_TEST2_PLUS2 = 9
IMITTATE_TEST2_PLUS3 = 10
IMITTATE_TEST2_PLUS4 = 11
IMITTATE_TEST2_PLUS6 = 12
class SplitTrainValidation(object):
"""This class implement differenct cross validation strategy, and is intened to be called by preprare data to return CV folds.
"""
def __init__(self):
return
def get_kfold_bydate(self, df, n_folds = 10):
df.sort_values(by = ['time_date','time_id','start_district_id'], axis = 0, inplace = True)
df.reset_index(drop=True, inplace = True)
kf = KFold(df.shape[0], n_folds= n_folds, shuffle=False)
for train_index, test_index in kf:
print("TRAIN:", train_index, "TEST:", test_index)
return kf
def get_kfold_forward_chaining(self, df):
res = []
df.sort_values(by = ['time_date','time_id','start_district_id'], axis = 0, inplace = True)
df.reset_index(drop=True, inplace = True)
fold_len = df.shape[0]/10
#fold 1-2, 3
item = np.arange(0,2*fold_len), np.arange(2*fold_len, 3*fold_len)
res.append(item)
#fold 1-3, 4
item = np.arange(0,3*fold_len), np.arange(3*fold_len, 4*fold_len)
res.append(item)
#fold 1-4, 5
item = np.arange(0,4*fold_len), np.arange(4*fold_len, 5*fold_len)
res.append(item)
#fold 1-5, 6
item = np.arange(0,5*fold_len), np.arange(5*fold_len, 6*fold_len)
res.append(item)
#fold 1-6, 7
item = np.arange(0,6*fold_len), np.arange(6*fold_len, 7*fold_len)
res.append(item)
#fold 1-7, 8
item = np.arange(0,7*fold_len), np.arange(7*fold_len, 8*fold_len)
res.append(item)
#fold 1-8, 9
item = np.arange(0,8*fold_len), np.arange(8*fold_len, 9*fold_len)
res.append(item)
#fold 1-9, 10
item = np.arange(0,9*fold_len), np.arange(9*fold_len, 10*fold_len)
res.append(item)
return res
def __get_date(self, start_date, days_num, days_step=2):
startDate = datetime.strptime(start_date, '%Y-%m-%d')
res = []
for i in range(days_num):
deltatime = timedelta(days = days_step*i)
item = (startDate + deltatime).date()
res.append(str(item))
return res
def __get_slots(self, split_method):
slot_split_dict = {}
slot_split_dict[HoldoutSplitMethod.IMITTATE_TEST2_MIN] = self.__get_slots_min()
slot_split_dict[HoldoutSplitMethod.IMITTATE_TEST2_FULL] = self.__get_slots_full()
slot_split_dict[HoldoutSplitMethod.IMITTATE_TEST2_PLUS1] = self.__getplusslots(plus_num = 1)
slot_split_dict[HoldoutSplitMethod.IMITTATE_TEST2_PLUS2] = self.__getplusslots(plus_num = 2)
slot_split_dict[HoldoutSplitMethod.IMITTATE_TEST2_PLUS3] = self.__getplusslots(plus_num = 3)
slot_split_dict[HoldoutSplitMethod.IMITTATE_TEST2_PLUS4] = self.__getplusslots(plus_num = 4)
slot_split_dict[HoldoutSplitMethod.IMITTATE_TEST2_PLUS6] = self.__getplusslots(plus_num = 6)
return slot_split_dict[split_method]
def __get_slots_min(self):
res = [46,58,70,82,94,106,118,130,142]
return res
def __get_slots_full(self):
res = [i+1 for i in range(144)]
return res
def __get_date_slots(self, dates, slots):
return [d + '-' + str(s) for d in dates for s in slots]
def __getplusslots(self, plus_num = 2):
res = []
min_slots = self.__get_slots_min()
for item in min_slots:
for i in range(plus_num+1):
x_below = item - i
x_above = item + i
if x_below <= 144 and x_below >= 1:
res.append(x_below)
if x_above <= 144 and x_above >= 1:
res.append(x_above)
return np.sort(list(set(res)))
def __unit_test(self):
assert ['2016-01-13','2016-01-15','2016-01-17','2016-01-19','2016-01-21'] == self.__get_date('2016-01-13', 5)
assert ['2016-01-12','2016-01-14','2016-01-16','2016-01-18','2016-01-20'] == self.__get_date('2016-01-12', 5)
print self.__getplusslots(2)
print self.__getplusslots(4)
print self.__getplusslots(6)
# self.get_holdoutset(holdout_id = 1)
#
# assert ['2016-01-13-46','2016-01-13-58','2016-01-13-70','2016-01-13-82','2016-01-13-94','2016-01-13-106','2016-01-13-118','2016-01-13-130','2016-01-13-142'] == self.get_holdoutset(holdout_id = 101)
print "unit test passed"
return
def __get_df_indexes(self, df, dateslots):
return df[df['time_slotid'].isin(dateslots)].index
def get_imitate_testset2(self,df, split_method = HoldoutSplitMethod.IMITTATE_TEST2_MIN):
df.sort_values(by = ['time_date','time_id','start_district_id'], axis = 0, inplace = True)
df.reset_index(drop=True, inplace = True)
res = []
# training 1-15, validation 16-21
# item = self.__get_train_validation_indexes(df, '2016-01-01', 15, split_method), self.__get_train_validation_indexes(df, '2016-01-16', 6)
# res.append(item)
#
# training 1-16, validation 17-21
item = self.__get_train_validation_indexes(df, '2016-01-01', 16, split_method), self.__get_train_validation_indexes(df, '2016-01-17', 5)
res.append(item)
# training 1-17, validation 18-21
item = self.__get_train_validation_indexes(df, '2016-01-01', 17, split_method), self.__get_train_validation_indexes(df, '2016-01-18', 4)
res.append(item)
# training 1-18, validation 19-21
item = self.__get_train_validation_indexes(df, '2016-01-01', 18, split_method), self.__get_train_validation_indexes(df, '2016-01-19', 3)
res.append(item)
# training 1-19, validation 19-21
# item = self.__get_train_validation_indexes(df, '2016-01-01', 19, split_method), self.__get_train_validation_indexes(df, '2016-01-20', 2)
# res.append(item)
#
# # training 1-20, validation 21
# item = self.__get_train_validation_indexes(df, '2016-01-01', 20, split_method), self.__get_train_validation_indexes(df, '2016-01-21', 1)
# res.append(item)
return res
def __get_train_validation_indexes(self,df, start_date, days_num, split_method = HoldoutSplitMethod.IMITTATE_TEST2_MIN):
dates = self.__get_date(start_date, days_num, days_step=1)
slots = self.__get_slots(split_method)
dates_slots = self.__get_date_slots(dates, slots)
indexes = self.__get_df_indexes(df, dates_slots)
return indexes
def run(self, df):
self.__unit_test()
# self.get_kfold_bydate(df)
# self.get_kfold_forward_chaining(df)
return
if __name__ == "__main__":
obj= SplitTrainValidation()
from preparedata import PrepareData
from utility.datafilepath import g_singletonDataFilePath
pre = PrepareData()
pre.X_y_Df = pre.load_gapdf(g_singletonDataFilePath.getTrainDir())
pre.__engineer_feature(g_singletonDataFilePath.getTrainDir())
obj.run(pre.X_y_Df)
|
mit
|
brain-research/fisher-rao-regularization
|
plot_utils.py
|
1
|
3485
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for plotting results."""
import json
import os
import re
import matplotlib.pyplot as plt
import numpy as np
ALPHA1 = 0.4
ALPHA2 = 0.2
COLOR1 = "red"
COLOR2 = "blue"
COLOR3 = "green"
COLOR4 = "purple"
COLOR5 = "orange"
COLOR6 = "black"
def load_data_for_pattern(pattern, data_dir):
"""Loads data from all files in data_dir that match a regular expression."""
file_list = os.listdir(data_dir)
data = []
for filename in file_list:
if pattern.match(filename):
with open(data_dir + filename, "r") as input_file:
data.append(json.load(input_file))
return data
def compute_statistics(data, variable):
"""Computes mean and standard deviations for given variable in loaded data."""
data_mean = data[0][variable]
data_std = np.zeros(len(data_mean))
if len(data) > 1:
for i in range(1, len(data)):
data_mean = np.add(data_mean, data[i][variable])
data_mean = np.divide(data_mean, len(data))
for i in range(len(data)):
data_std = np.add(data_std,
np.square(np.subtract(data[i][variable], data_mean)))
data_std = np.sqrt(np.divide(data_std, len(data) - 1))
return (data_mean, data_std)
def draw_plot(data_mean, data_std, label, title, color, factor):
"""Draws plot with two standard deviation error bars."""
num_iter = np.multiply(factor, range(0, len(data_mean)))
low_fill = np.subtract(data_mean, np.multiply(2., data_std))
high_fill = np.add(data_mean, np.multiply(2., data_std))
plt.plot(num_iter, data_mean, color=color, alpha=ALPHA1, label=label)
plt.fill_between(num_iter, low_fill, high_fill, color=color, lw=0,
alpha=ALPHA2)
plt.xlabel("# iter")
plt.ylabel(title)
plt.legend(loc="best")
def example():
"""Demonstrates how to plot results from two experiments to compare results.
"""
plt.figure()
label = ".*"
pattern = re.compile(label)
data = load_data_for_pattern(pattern, "/tmp/data/")
train_loss_mean, train_loss_std = compute_statistics(data, "train_loss")
train_acc_mean, train_acc_std = compute_statistics(data, "train_accuracy")
test_loss_mean, test_loss_std = compute_statistics(data, "test_loss")
test_acc_mean, test_acc_std = compute_statistics(data, "test_accuracy")
regularizer_mean, regularizer_std = compute_statistics(data,
"regularizer_loss")
plt.subplot(1, 3, 1)
draw_plot(train_loss_mean, train_loss_std, "train", "loss", COLOR1, 100)
draw_plot(test_loss_mean, test_loss_std, "test", "loss", COLOR2, 100)
plt.subplot(1, 3, 2)
draw_plot(train_acc_mean, train_acc_std, "train", "accuracy", COLOR1, 100)
draw_plot(test_acc_mean, test_acc_std, "test", "accuracy", COLOR2, 100)
plt.subplot(1, 3, 3)
draw_plot(regularizer_mean, regularizer_std, "regularizer", "loss", COLOR1, 1)
plt.show()
if __name__ == '__main__':
example()
|
apache-2.0
|
umuzungu/zipline
|
tests/pipeline/test_pipeline_algo.py
|
2
|
20033
|
"""
Tests for Algorithms using the Pipeline API.
"""
from os.path import (
dirname,
join,
realpath,
)
from nose_parameterized import parameterized
from numpy import (
array,
arange,
full_like,
float64,
nan,
uint32,
)
from numpy.testing import assert_almost_equal
import pandas as pd
from pandas import (
concat,
DataFrame,
date_range,
read_csv,
Series,
Timestamp,
)
from six import iteritems, itervalues
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
attach_pipeline,
pipeline_output,
get_datetime,
)
from zipline.errors import (
AttachPipelineAfterInitialize,
PipelineOutputDuringInitialize,
NoSuchPipeline,
)
from zipline.lib.adjustment import MULTIPLY
from zipline.pipeline import Pipeline
from zipline.pipeline.factors import VWAP
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders.frame import DataFrameLoader
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.testing import (
str_to_seconds
)
from zipline.testing import (
create_empty_splits_mergers_frame,
FakeDataPortal,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithBcolzDailyBarReaderFromCSVs,
WithDataPortal,
ZiplineTestCase,
)
from zipline.utils.tradingcalendar import trading_day
TEST_RESOURCE_PATH = join(
dirname(dirname(realpath(__file__))), # zipline_repo/tests
'resources',
'pipeline_inputs',
)
def rolling_vwap(df, length):
"Simple rolling vwap implementation for testing"
closes = df['close'].values
volumes = df['volume'].values
product = closes * volumes
out = full_like(closes, nan)
for upper_bound in range(length, len(closes) + 1):
bounds = slice(upper_bound - length, upper_bound)
out[upper_bound - 1] = product[bounds].sum() / volumes[bounds].sum()
return Series(out, index=df.index)
class ClosesOnly(WithDataPortal, ZiplineTestCase):
sids = 1, 2, 3
START_DATE = pd.Timestamp('2014-01-01', tz='utc')
END_DATE = pd.Timestamp('2014-02-01', tz='utc')
dates = date_range(START_DATE, END_DATE, freq=trading_day, tz='utc')
@classmethod
def make_equity_info(cls):
cls.equity_info = ret = DataFrame.from_records([
{
'sid': 1,
'symbol': 'A',
'start_date': cls.dates[10],
'end_date': cls.dates[13],
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'B',
'start_date': cls.dates[11],
'end_date': cls.dates[14],
'exchange': 'TEST',
},
{
'sid': 3,
'symbol': 'C',
'start_date': cls.dates[12],
'end_date': cls.dates[15],
'exchange': 'TEST',
},
])
return ret
@classmethod
def make_daily_bar_data(cls):
cls.closes = DataFrame(
{sid: arange(1, len(cls.dates) + 1) * sid for sid in cls.sids},
index=cls.dates,
dtype=float,
)
for sid in cls.sids:
yield sid, DataFrame(
{
'open': cls.closes[sid].values,
'high': cls.closes[sid].values,
'low': cls.closes[sid].values,
'close': cls.closes[sid].values,
'volume': cls.closes[sid].values,
},
index=cls.dates,
)
@classmethod
def init_class_fixtures(cls):
super(ClosesOnly, cls).init_class_fixtures()
cls.first_asset_start = min(cls.equity_info.start_date)
cls.last_asset_end = max(cls.equity_info.end_date)
cls.assets = cls.asset_finder.retrieve_all(cls.sids)
# Add a split for 'A' on its second date.
cls.split_asset = cls.assets[0]
cls.split_date = cls.split_asset.start_date + trading_day
cls.split_ratio = 0.5
cls.adjustments = DataFrame.from_records([
{
'sid': cls.split_asset.sid,
'value': cls.split_ratio,
'kind': MULTIPLY,
'start_date': Timestamp('NaT'),
'end_date': cls.split_date,
'apply_date': cls.split_date,
}
])
def init_instance_fixtures(self):
super(ClosesOnly, self).init_instance_fixtures()
# View of the data on/after the split.
self.adj_closes = adj_closes = self.closes.copy()
adj_closes.ix[:self.split_date, self.split_asset] *= self.split_ratio
self.pipeline_loader = DataFrameLoader(
column=USEquityPricing.close,
baseline=self.closes,
adjustments=self.adjustments,
)
def expected_close(self, date, asset):
if date < self.split_date:
lookup = self.closes
else:
lookup = self.adj_closes
return lookup.loc[date, asset]
def exists(self, date, asset):
return asset.start_date <= date <= asset.end_date
def test_attach_pipeline_after_initialize(self):
"""
Assert that calling attach_pipeline after initialize raises correctly.
"""
def initialize(context):
pass
def late_attach(context, data):
attach_pipeline(Pipeline(), 'test')
raise AssertionError("Shouldn't make it past attach_pipeline!")
algo = TradingAlgorithm(
initialize=initialize,
handle_data=late_attach,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_asset_end + trading_day,
env=self.env,
)
with self.assertRaises(AttachPipelineAfterInitialize):
algo.run(self.data_portal)
def barf(context, data):
raise AssertionError("Shouldn't make it past before_trading_start")
algo = TradingAlgorithm(
initialize=initialize,
before_trading_start=late_attach,
handle_data=barf,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_asset_end + trading_day,
env=self.env,
)
with self.assertRaises(AttachPipelineAfterInitialize):
algo.run(self.data_portal)
def test_pipeline_output_after_initialize(self):
"""
Assert that calling pipeline_output after initialize raises correctly.
"""
def initialize(context):
attach_pipeline(Pipeline(), 'test')
pipeline_output('test')
raise AssertionError("Shouldn't make it past pipeline_output()")
def handle_data(context, data):
raise AssertionError("Shouldn't make it past initialize!")
def before_trading_start(context, data):
raise AssertionError("Shouldn't make it past initialize!")
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_asset_end + trading_day,
env=self.env,
)
with self.assertRaises(PipelineOutputDuringInitialize):
algo.run(self.data_portal)
def test_get_output_nonexistent_pipeline(self):
"""
Assert that calling add_pipeline after initialize raises appropriately.
"""
def initialize(context):
attach_pipeline(Pipeline(), 'test')
def handle_data(context, data):
raise AssertionError("Shouldn't make it past before_trading_start")
def before_trading_start(context, data):
pipeline_output('not_test')
raise AssertionError("Shouldn't make it past pipeline_output!")
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start - trading_day,
end=self.last_asset_end + trading_day,
env=self.env,
)
with self.assertRaises(NoSuchPipeline):
algo.run(self.data_portal)
@parameterized.expand([('default', None),
('day', 1),
('week', 5),
('year', 252),
('all_but_one_day', 'all_but_one_day')])
def test_assets_appear_on_correct_days(self, test_name, chunksize):
"""
Assert that assets appear at correct times during a backtest, with
correctly-adjusted close price values.
"""
if chunksize == 'all_but_one_day':
chunksize = (
self.dates.get_loc(self.last_asset_end) -
self.dates.get_loc(self.first_asset_start)
) - 1
def initialize(context):
p = attach_pipeline(Pipeline(), 'test', chunksize=chunksize)
p.add(USEquityPricing.close.latest, 'close')
def handle_data(context, data):
results = pipeline_output('test')
date = get_datetime().normalize()
for asset in self.assets:
# Assets should appear iff they exist today and yesterday.
exists_today = self.exists(date, asset)
existed_yesterday = self.exists(date - trading_day, asset)
if exists_today and existed_yesterday:
latest = results.loc[asset, 'close']
self.assertEqual(latest, self.expected_close(date, asset))
else:
self.assertNotIn(asset, results.index)
before_trading_start = handle_data
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.first_asset_start,
end=self.last_asset_end,
env=self.env,
)
# Run for a week in the middle of our data.
algo.run(self.data_portal)
class MockDailyBarSpotReader(object):
"""
A BcolzDailyBarReader which returns a constant value for spot price.
"""
def spot_price(self, sid, day, column):
return 100.0
class PipelineAlgorithmTestCase(WithBcolzDailyBarReaderFromCSVs,
WithAdjustmentReader,
ZiplineTestCase):
AAPL = 1
MSFT = 2
BRK_A = 3
assets = ASSET_FINDER_EQUITY_SIDS = AAPL, MSFT, BRK_A
ASSET_FINDER_EQUITY_SYMBOLS = 'AAPL', 'MSFT', 'BRK_A'
START_DATE = Timestamp('2014')
END_DATE = Timestamp('2015')
BCOLZ_DAILY_BAR_USE_FULL_CALENDAR = True
@classmethod
def make_daily_bar_data(cls):
resources = {
cls.AAPL: join(TEST_RESOURCE_PATH, 'AAPL.csv'),
cls.MSFT: join(TEST_RESOURCE_PATH, 'MSFT.csv'),
cls.BRK_A: join(TEST_RESOURCE_PATH, 'BRK-A.csv'),
}
cls.raw_data = raw_data = {
asset: read_csv(path, parse_dates=['day']).set_index('day')
for asset, path in resources.items()
}
# Add 'price' column as an alias because all kinds of stuff in zipline
# depends on it being present. :/
for frame in raw_data.values():
frame['price'] = frame['close']
return resources
@classmethod
def make_splits_data(cls):
return DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-06-09'),
'ratio': (1 / 7.0),
'sid': cls.AAPL,
}
])
@classmethod
def make_mergers_data(cls):
return create_empty_splits_mergers_frame()
@classmethod
def make_dividends_data(cls):
return pd.DataFrame(array([], dtype=[
('sid', uint32),
('amount', float64),
('record_date', 'datetime64[ns]'),
('ex_date', 'datetime64[ns]'),
('declared_date', 'datetime64[ns]'),
('pay_date', 'datetime64[ns]'),
]))
@classmethod
def init_class_fixtures(cls):
super(PipelineAlgorithmTestCase, cls).init_class_fixtures()
cls.pipeline_loader = USEquityPricingLoader(
cls.bcolz_daily_bar_reader,
cls.adjustment_reader,
)
cls.dates = cls.raw_data[cls.AAPL].index.tz_localize('UTC')
cls.AAPL_split_date = Timestamp("2014-06-09", tz='UTC')
def compute_expected_vwaps(self, window_lengths):
AAPL, MSFT, BRK_A = self.AAPL, self.MSFT, self.BRK_A
# Our view of the data before AAPL's split on June 9, 2014.
raw = {k: v.copy() for k, v in iteritems(self.raw_data)}
split_date = self.AAPL_split_date
split_loc = self.dates.get_loc(split_date)
split_ratio = 7.0
# Our view of the data after AAPL's split. All prices from before June
# 9 get divided by the split ratio, and volumes get multiplied by the
# split ratio.
adj = {k: v.copy() for k, v in iteritems(self.raw_data)}
for column in 'open', 'high', 'low', 'close':
adj[AAPL].ix[:split_loc, column] /= split_ratio
adj[AAPL].ix[:split_loc, 'volume'] *= split_ratio
# length -> asset -> expected vwap
vwaps = {length: {} for length in window_lengths}
for length in window_lengths:
for asset in AAPL, MSFT, BRK_A:
raw_vwap = rolling_vwap(raw[asset], length)
adj_vwap = rolling_vwap(adj[asset], length)
# Shift computed results one day forward so that they're
# labelled by the date on which they'll be seen in the
# algorithm. (We can't show the close price for day N until day
# N + 1.)
vwaps[length][asset] = concat(
[
raw_vwap[:split_loc - 1],
adj_vwap[split_loc - 1:]
]
).shift(1, trading_day)
# Make sure all the expected vwaps have the same dates.
vwap_dates = vwaps[1][self.AAPL].index
for dict_ in itervalues(vwaps):
# Each value is a dict mapping sid -> expected series.
for series in itervalues(dict_):
self.assertTrue((vwap_dates == series.index).all())
# Spot check expectations near the AAPL split.
# length 1 vwap for the morning before the split should be the close
# price of the previous day.
before_split = vwaps[1][AAPL].loc[split_date - trading_day]
assert_almost_equal(before_split, 647.3499, decimal=2)
assert_almost_equal(
before_split,
raw[AAPL].loc[split_date - (2 * trading_day), 'close'],
decimal=2,
)
# length 1 vwap for the morning of the split should be the close price
# of the previous day, **ADJUSTED FOR THE SPLIT**.
on_split = vwaps[1][AAPL].loc[split_date]
assert_almost_equal(on_split, 645.5700 / split_ratio, decimal=2)
assert_almost_equal(
on_split,
raw[AAPL].loc[split_date - trading_day, 'close'] / split_ratio,
decimal=2,
)
# length 1 vwap on the day after the split should be the as-traded
# close on the split day.
after_split = vwaps[1][AAPL].loc[split_date + trading_day]
assert_almost_equal(after_split, 93.69999, decimal=2)
assert_almost_equal(
after_split,
raw[AAPL].loc[split_date, 'close'],
decimal=2,
)
return vwaps
@parameterized.expand([
(True,),
(False,),
])
def test_handle_adjustment(self, set_screen):
AAPL, MSFT, BRK_A = assets = self.AAPL, self.MSFT, self.BRK_A
window_lengths = [1, 2, 5, 10]
vwaps = self.compute_expected_vwaps(window_lengths)
def vwap_key(length):
return "vwap_%d" % length
def initialize(context):
pipeline = Pipeline()
context.vwaps = []
for length in vwaps:
name = vwap_key(length)
factor = VWAP(window_length=length)
context.vwaps.append(factor)
pipeline.add(factor, name=name)
filter_ = (USEquityPricing.close.latest > 300)
pipeline.add(filter_, 'filter')
if set_screen:
pipeline.set_screen(filter_)
attach_pipeline(pipeline, 'test')
def handle_data(context, data):
today = get_datetime()
results = pipeline_output('test')
expect_over_300 = {
AAPL: today < self.AAPL_split_date,
MSFT: False,
BRK_A: True,
}
for asset in assets:
should_pass_filter = expect_over_300[asset]
if set_screen and not should_pass_filter:
self.assertNotIn(asset, results.index)
continue
asset_results = results.loc[asset]
self.assertEqual(asset_results['filter'], should_pass_filter)
for length in vwaps:
computed = results.loc[asset, vwap_key(length)]
expected = vwaps[length][asset].loc[today]
# Only having two places of precision here is a bit
# unfortunate.
assert_almost_equal(computed, expected, decimal=2)
# Do the same checks in before_trading_start
before_trading_start = handle_data
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.dates[max(window_lengths)],
end=self.dates[-1],
env=self.env,
)
algo.run(
FakeDataPortal(),
# Yes, I really do want to use the start and end dates I passed to
# TradingAlgorithm.
overwrite_sim_params=False,
)
def test_empty_pipeline(self):
# For ensuring we call before_trading_start.
count = [0]
def initialize(context):
pipeline = attach_pipeline(Pipeline(), 'test')
vwap = VWAP(window_length=10)
pipeline.add(vwap, 'vwap')
# Nothing should have prices less than 0.
pipeline.set_screen(vwap < 0)
def handle_data(context, data):
pass
def before_trading_start(context, data):
context.results = pipeline_output('test')
self.assertTrue(context.results.empty)
count[0] += 1
algo = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
data_frequency='daily',
get_pipeline_loader=lambda column: self.pipeline_loader,
start=self.dates[0],
end=self.dates[-1],
env=self.env,
)
algo.run(
FakeDataPortal(),
overwrite_sim_params=False,
)
self.assertTrue(count[0] > 0)
|
apache-2.0
|
mhdella/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
aflaxman/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
21
|
17922
|
import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
|
bsd-3-clause
|
emon10005/scikit-image
|
doc/examples/plot_phase_unwrap.py
|
14
|
4080
|
"""
================
Phase Unwrapping
================
Some signals can only be observed modulo 2*pi, and this can also apply to
two- and three dimensional images. In these cases phase unwrapping is
needed to recover the underlying, unwrapped signal. In this example we will
demonstrate an algorithm [1]_ implemented in ``skimage`` at work for such a
problem. One-, two- and three dimensional images can all be unwrapped using
skimage. Here we will demonstrate phase unwrapping in the two dimensional case.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float, color, exposure
from skimage.restoration import unwrap_phase
# Load an image as a floating-point grayscale
image = color.rgb2gray(img_as_float(data.chelsea()))
# Scale the image to [0, 4*pi]
image = exposure.rescale_intensity(image, out_range=(0, 4 * np.pi))
# Create a phase-wrapped image in the interval [-pi, pi)
image_wrapped = np.angle(np.exp(1j * image))
# Perform phase unwrapping
image_unwrapped = unwrap_phase(image_wrapped)
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(image, cmap='gray', vmin=0, vmax=4 * np.pi), ax=ax1)
ax1.set_title('Original')
fig.colorbar(ax2.imshow(image_wrapped, cmap='gray', vmin=-np.pi, vmax=np.pi), ax=ax2)
ax2.set_title('Wrapped phase')
fig.colorbar(ax3.imshow(image_unwrapped, cmap='gray'), ax=ax3)
ax3.set_title('After phase unwrapping')
fig.colorbar(ax4.imshow(image_unwrapped - image, cmap='gray'), ax=ax4)
ax4.set_title('Unwrapped minus original')
"""
.. image:: PLOT2RST.current_figure
The unwrapping procedure accepts masked arrays, and can also optionally
assume cyclic boundaries to connect edges of an image. In the example below,
we study a simple phase ramp which has been split in two by masking
a row of the image.
"""
# Create a simple ramp
image = np.ones((100, 100)) * np.linspace(0, 8 * np.pi, 100).reshape((-1, 1))
# Mask the image to split it in two horizontally
mask = np.zeros_like(image, dtype=np.bool)
mask[image.shape[0] // 2, :] = True
image_wrapped = np.ma.array(np.angle(np.exp(1j * image)), mask=mask)
# Unwrap image without wrap around
image_unwrapped_no_wrap_around = unwrap_phase(image_wrapped,
wrap_around=(False, False))
# Unwrap with wrap around enabled for the 0th dimension
image_unwrapped_wrap_around = unwrap_phase(image_wrapped,
wrap_around=(True, False))
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(np.ma.array(image, mask=mask), cmap='jet'), ax=ax1)
ax1.set_title('Original')
fig.colorbar(ax2.imshow(image_wrapped, cmap='jet', vmin=-np.pi, vmax=np.pi),
ax=ax2)
ax2.set_title('Wrapped phase')
fig.colorbar(ax3.imshow(image_unwrapped_no_wrap_around, cmap='jet'),
ax=ax3)
ax3.set_title('Unwrapped without wrap_around')
fig.colorbar(ax4.imshow(image_unwrapped_wrap_around, cmap='jet'), ax=ax4)
ax4.set_title('Unwrapped with wrap_around')
plt.show()
"""
.. image:: PLOT2RST.current_figure
In the figures above, the masked row can be seen as a white line across
the image. The difference between the two unwrapped images in the bottom row
is clear: Without unwrapping (lower left), the regions above and below the
masked boundary do not interact at all, resulting in an offset between the
two regions of an arbitrary integer times two pi. We could just as well have
unwrapped the regions as two separate images. With wrap around enabled for the
vertical direction (lower right), the situation changes: Unwrapping paths are
now allowed to pass from the bottom to the top of the image and vice versa, in
effect providing a way to determine the offset between the two regions.
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35, pp. 7437, 2002
"""
|
bsd-3-clause
|
ibell/coolprop
|
dev/Tickets/77.py
|
5
|
2204
|
import matplotlib
matplotlib.use('Qt4Agg')
import numpy
import CoolProp
import CoolProp.CoolProp as CP
print "Testing the derivatives and store results for "+CoolProp.__gitrevision__
keys = ["dpdT","dpdrho","Z","dZ_dDelta","dZ_dTau","B","dBdT","C","dCdT","phir","dphir_dTau","d2phir_dTau2","dphir_dDelta",
"d2phir_dDelta2","d2phir_dDelta_dTau","d3phir_dDelta2_dTau","phi0","dphi0_dTau","d2phi0_dTau2","dphi0_dDelta","d2phi0_dDelta2",
"IsothermalCompressibility"]
multiply = ["VB","dBdT","VC","dCdT","dphir_dDelta","dpdT"]
keys = multiply[:]
fluid = "n-Pentane"
T = numpy.array([ 30,100,150,210])+273.15
D = numpy.array([710, 20, 20,210])
for key in keys:
liquid = CP.DerivTerms(key,T[0],D[0],fluid)
twophase = CP.DerivTerms(key,T[1],D[1],fluid)
gaseous = CP.DerivTerms(key,T[2],D[2],fluid)
supercrit = CP.DerivTerms(key,T[3],D[3],fluid)
print '{:<25}: {:>10.5f}; {:>10.5f}; {:>10.5f}; {:>10.5f}'.format(key,liquid,twophase,gaseous,supercrit)
for key in keys:
liquid = CP.DerivTermsU(key,T[0],D[0],fluid,'SI')
twophase = CP.DerivTermsU(key,T[1],D[1],fluid,'SI')
gaseous = CP.DerivTermsU(key,T[2],D[2],fluid,'SI')
supercrit = CP.DerivTermsU(key,T[3],D[3],fluid,'SI')
print '{:<25}: {:>10.5f}; {:>10.5f}; {:>10.5f}; {:>10.5f}'.format(key,liquid,twophase,gaseous,supercrit)
print
print "Testing Props: "
for key in keys:
liquid = CP.Props(key,"T",float(T[0]),"D",float(D[0]),fluid)
twophase = CP.Props(key,"T",float(T[1]),"D",float(D[1]),fluid)
gaseous = CP.Props(key,"T",float(T[2]),"D",float(D[2]),fluid)
supercrit = CP.Props(key,"T",float(T[3]),"D",float(D[3]),fluid)
print '{:<25}: {:>10.5f}; {:>10.5f}; {:>10.5f}; {:>10.5f}'.format(key,liquid,twophase,gaseous,supercrit)
for key in keys:
liquid = CP.PropsU(key,"T",float(T[0]),"D",float(D[0]),fluid,'SI')
twophase = CP.PropsU(key,"T",float(T[1]),"D",float(D[1]),fluid,'SI')
gaseous = CP.PropsU(key,"T",float(T[2]),"D",float(D[2]),fluid,'SI')
supercrit = CP.PropsU(key,"T",float(T[3]),"D",float(D[3]),fluid,'SI')
print '{:<25}: {:>10.5f}; {:>10.5f}; {:>10.5f}; {:>10.5f}'.format(key,liquid,twophase,gaseous,supercrit)
|
mit
|
aitoralmeida/dl_activity_recognition
|
lstm/data-framer.py
|
1
|
15250
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 14:38:48 2017
@author: gazkune
"""
from collections import Counter
import sys
from copy import deepcopy
import pandas as pd
from keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from gensim.models import Word2Vec
from sklearn.model_selection import StratifiedShuffleSplit
import numpy as np
# Directory of datasets
DIR = '../sensor2vec/casas_aruba_dataset/'
# Choose the specific dataset
#DATASET_CSV = DIR + 'aruba_complete_numeric.csv'
DATASET_CSV = DIR + 'aruba_no_t.csv'
# ACTION_VECTORS = DIR + 'action2vec/actions_vectors.json'
# Word2Vec model
#WORD2VEC_MODEL = DIR + 'action2vec/continuous_complete_numeric_200_10.model' # d=200, win=10
WORD2VEC_MODEL = DIR + 'action2vec/continuous_no_t_50_10.model' # d=50, win=10
# Number of dimensions of an action vector
#ACTION_MAX_LENGTH = 200 # Make coherent with selected WORD2VEC_MODEL
ACTION_MAX_LENGTH = 50 # Make coherent with selected WORD2VEC_MODEL
OUTPUT_ROOT_NAME = 'formatted_data/aruba_continuous_no_t_50_10' # make coherent with WORD2VEC_MODEL
# Time interval for action segmentation
DELTA = 60
# To create training, validation and test set, we can load previously generated X and y files
READ_PREVIOUS_XY = True
PREVIOUS_X = OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_x.npy'
PREVIOUS_Y = OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_y.npy'
"""
Function which implements the data framing to use an embedding layer
Input:
df -> Pandas DataFrame with timestamp, action and activity
activity_to_int -> dict with the mappings between activities and integer indices
delta -> integer to control the segmentation of actions for sequence generation
Output:
X -> array with action index sequences
y -> array with activity labels as integers
tokenizer -> instance of Tokenizer class used for action/index convertion
"""
def prepare_embeddings(df, activity_to_int, delta = 0):
# Numpy array with all the actions of the dataset
actions = df['action'].values
print "prepare_embeddings: actions length:", len(actions)
# Use tokenizer to generate indices for every action
# Very important to put lower=False, since the Word2Vec model
# has the action names with some capital letters
# Very important to remove '.' and '_' from filters, since they are used
# in action names (T003_21.5)
tokenizer = Tokenizer(lower=False, filters='!"#$%&()*+,-/:;<=>?@[\\]^`{|}~\t\n')
tokenizer.fit_on_texts(actions)
action_index = tokenizer.word_index
print "prepare_embeddings: action_index:"
print action_index.keys()
# Build new list with action indices
trans_actions = np.zeros(len(actions))
for i in xrange(len(actions)):
#print "prepare_embeddings: action:", actions[i]
trans_actions[i] = action_index[actions[i]]
#print trans_actions
X = []
y = []
# Depending on delta, we generate sequences in different ways
if delta == 0:
# Each sequence is composed by the actions of that
# activity instance
current_activity = ""
actionsdf = []
aux_actions = []
i = 0
ACTIVITY_MAX_LENGTH = 0
for index in df.index:
if current_activity == "":
current_activity = df.loc[index, 'activity']
if current_activity != df.loc[index, 'activity']:
y.append(activity_to_int[current_activity])
X.append(actionsdf)
#print current_activity, aux_actions
current_activity = df.loc[index, 'activity']
# reset auxiliary variables
actionsdf = []
aux_actions = []
#print 'Current action: ', action
actionsdf.append(np.array(trans_actions[i]))
aux_actions.append(trans_actions[i])
i = i + 1
# Append the last activity
y.append(activity_to_int[current_activity])
X.append(actionsdf)
if len(actionsdf) > ACTIVITY_MAX_LENGTH:
ACTIVITY_MAX_LENGTH = len(actionsdf)
else:
# TODO: use delta value as the time slice for action segmentation
# as Kasteren et al.
print 'prepare_embeddings: delta value =', delta
current_index = df.index[0]
last_index = df.index[len(df) - 1]
i = 0
DYNAMIC_MAX_LENGTH = 0
while current_index < last_index:
current_time = df.loc[current_index, 'timestamp']
#print 'prepare_embeddings: inside while', i
#print 'prepare_embeddings: current time', current_time
i = i + 1
"""
if i % 10 == 0:
print '.',
"""
actionsdf = []
#auxdf = df.iloc[np.logical_and(df.index >= current_index, df.index < current_index + pd.DateOffset(seconds=delta))]
auxdf = df.loc[np.logical_and(df.timestamp >= current_time, df.timestamp < current_time + pd.DateOffset(seconds=delta))]
#print 'auxdf'
#print auxdf
#first = df.index.get_loc(auxdf.index[0])
first = auxdf.index[0]
#last = df.index.get_loc(auxdf.index[len(auxdf)-1])
last = auxdf.index[len(auxdf)-1]
#print 'First:', first, 'Last:', last
#actionsdf.append(np.array(trans_actions[first:last]))
if first == last:
actionsdf.append(np.array(trans_actions[first]))
else:
for j in xrange(first, last+1):
actionsdf.append(np.array(trans_actions[j]))
if len(actionsdf) > DYNAMIC_MAX_LENGTH:
print " "
DYNAMIC_MAX_LENGTH = len(actionsdf)
print "MAX LENGTH =", DYNAMIC_MAX_LENGTH
print 'First:', auxdf.loc[first, 'timestamp'], 'Last:', auxdf.loc[last, 'timestamp']
print 'first index:', first, 'last index:', last
print 'Length:', len(auxdf)
#print auxdf
#print actionsdf
X.append(actionsdf)
# Find the dominant activity in the time slice of auxdf
activity = auxdf['activity'].value_counts().idxmax()
y.append(activity_to_int[activity])
# Update current_index
#pos = df.index.get_loc(auxdf.index[len(auxdf)-1])
#current_index = df.index[pos+1]
if last < last_index:
current_index = last + 1
else:
current_index = last_index
# Pad sequences
max_sequence_length = 0
if delta != 0:
X = pad_sequences(X, maxlen=DYNAMIC_MAX_LENGTH, dtype='float32')
max_sequence_length = DYNAMIC_MAX_LENGTH
else:
X = pad_sequences(X, maxlen=ACTIVITY_MAX_LENGTH, dtype='float32')
max_sequence_length = ACTIVITY_MAX_LENGTH
return X, y, tokenizer, max_sequence_length
# Function to create the embedding matrix, which will be used to initialize
# the embedding layer of the network
def create_embedding_matrix(tokenizer):
model = Word2Vec.load(WORD2VEC_MODEL)
action_index = tokenizer.word_index
embedding_matrix = np.zeros((len(action_index) + 1, ACTION_MAX_LENGTH))
unknown_words = {}
for action, i in action_index.items():
try:
embedding_vector = model[action]
embedding_matrix[i] = embedding_vector
except Exception as e:
#print type(e) exceptions.KeyError
if action in unknown_words:
unknown_words[action] += 1
else:
unknown_words[action] = 1
print "Number of unknown tokens: " + str(len(unknown_words))
print unknown_words
return embedding_matrix
def createStoreNaiveDatasets(X, y):
print "Naive strategy"
total_examples = len(X)
train_per = 0.6
val_per = 0.2
# test_per = 0.2 # Not needed
train_limit = int(train_per * total_examples)
val_limit = train_limit + int(val_per * total_examples)
X_train = X[0:train_limit]
X_val = X[train_limit:val_limit]
X_test = X[val_limit:]
y_train = y[0:train_limit]
y_val = y[train_limit:val_limit]
y_test = y[val_limit:]
print ' Total examples:', total_examples
print ' Train examples:', len(X_train), len(y_train)
print ' Validation examples:', len(X_val), len(y_val)
print ' Test examples:', len(X_test), len(y_test)
sys.stdout.flush()
X_train = np.array(X_train)
y_train = np.array(y_train)
print ' Activity distribution for training:'
y_train_code = np.array([np.argmax(y_train[x]) for x in xrange(len(y_train))])
print Counter(y_train_code)
X_val = np.array(X_val)
y_val = np.array(y_val)
print ' Activity distribution for validation:'
y_val_code = np.array([np.argmax(y_val[x]) for x in xrange(len(y_val))])
print Counter(y_val_code)
X_test = np.array(X_test)
y_test = np.array(y_test)
print ' Activity distribution for testing:'
y_test_code = np.array([np.argmax(y_test[x]) for x in xrange(len(y_test))])
print Counter(y_test_code)
# Save training, validation and test sets using numpy serialization
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_x_train.npy', X_train)
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_x_val.npy', X_val)
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_x_test.npy', X_test)
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_y_train.npy', y_train)
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_y_val.npy', y_val)
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_y_test.npy', y_test)
print " Formatted data saved"
def createStoreStratifiedDatasets(X, y):
print "Stratified strategy"
# Create the StratifiedShuffleSplit object
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
# Generate indices for training and testing set
# As sss.split() is a generator, we must use next()
train_index, test_index = sss.split(X, y).next()
# Generate X_train, y_train, X_test and y_test
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
# Now, generate the validation sets from the training set
# For validation set we keep using 20% of the training data
train_index, val_index = sss.split(X_train, y_train).next()
X_val = X_train[val_index]
y_val = y_train[val_index]
X_train = X_train[train_index]
y_train = y_train[train_index]
# Print activity distributions to make sure everything is allright
print ' X_train shape:', X_train.shape
print ' y_train shape:', y_train.shape
y_train_code = np.array([np.argmax(y_train[x]) for x in xrange(len(y_train))])
print ' Activity distribution for training:'
print Counter(y_train_code)
print ' X_val shape:', X_val.shape
print ' y_val shape:', y_val.shape
y_val_code = np.array([np.argmax(y_val[x]) for x in xrange(len(y_val))])
print ' Activity distribution for training:'
print Counter(y_val_code)
print ' X_test shape:', X_test.shape
print ' y_test shape:', y_test.shape
y_test_code = np.array([np.argmax(y_test[x]) for x in xrange(len(y_test))])
print ' Activity distribution for training:'
print Counter(y_test_code)
# Save the generated datasets in the corresponding files
np.save(OUTPUT_ROOT_NAME + '_stratified_' + str(DELTA) + '_x_train.npy', X_train)
np.save(OUTPUT_ROOT_NAME + '_stratified_' + str(DELTA) + '_x_val.npy', X_val)
np.save(OUTPUT_ROOT_NAME + '_stratified_' + str(DELTA) + '_x_test.npy', X_test)
np.save(OUTPUT_ROOT_NAME + '_stratified_' + str(DELTA) + '_y_train.npy', y_train)
np.save(OUTPUT_ROOT_NAME + '_stratified_' + str(DELTA) + '_y_val.npy', y_val)
np.save(OUTPUT_ROOT_NAME + '_stratified_' + str(DELTA) + '_y_test.npy', y_test)
# Main function
def main(argv):
if READ_PREVIOUS_XY == False:
# Load dataset from csv file
df = pd.read_csv(DATASET_CSV, parse_dates=[0], header=None)
df.columns = ["timestamp", 'action', 'activity']
#df = df[0:1000] # reduce dataset for tests
unique_activities = df['activity'].unique()
print "Unique activities:"
print unique_activities
total_activities = len(unique_activities)
#action_vectors = json.load(open(ACTION_VECTORS, 'r'))
# Generate the dict to transform activities to integer numbers
activity_to_int = dict((c, i) for i, c in enumerate(unique_activities))
# Generate the dict to transform integer numbers to activities
int_to_activity = dict((i, c) for i, c in enumerate(unique_activities))
# TODO: save those two dicts in a file
# Prepare sequences using action indices
# Each action will be an index which will point to an action vector
# in the weights matrix of the Embedding layer of the network input
# Use 'delta' to establish slicing time; if 0, slicing done on activity type basis
X, y, tokenizer, max_sequence_length = prepare_embeddings(df, activity_to_int, delta=DELTA)
# Create the embedding matrix for the embedding layer initialization
embedding_matrix = create_embedding_matrix(tokenizer)
print 'max sequence length:', max_sequence_length
print 'X shape:', X.shape
print 'embedding matrix shape:', embedding_matrix.shape
# Keep original y (with activity indices) before transforming it to categorical
y_orig = deepcopy(y)
# Tranform class labels to one-hot encoding
y = np_utils.to_categorical(y)
print 'y shape:', y.shape
# Save X, y and embedding_matrix using numpy serialization
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_x.npy', X)
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_y.npy', y)
np.save(OUTPUT_ROOT_NAME + '_' + str(DELTA) + '_embedding_weights.npy', embedding_matrix)
else:
X = np.load(PREVIOUS_X)
y= np.load(PREVIOUS_Y)
# Prepare training, validation and testing datasets
# We implement two strategies for this:
# 1: Naive datasets, using only the percentages
# This strategy preserves the original sequences and time dependencies
# It can be useful for stateful LSTMs
#createStoreNaiveDatasets(X, y)
# 2: Stratified datasets, making sure all three sets have the same percentage of classes
# This strategy may break the time dependencies amongst sequences
createStoreStratifiedDatasets(X, y)
if __name__ == "__main__":
main(sys.argv)
|
gpl-3.0
|
henriquemiranda/yambopy
|
yambopy/dbs/electronsdb.py
|
3
|
9258
|
# Copyright (c) 2016, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from netCDF4 import Dataset
import numpy as np
from itertools import product
import collections
ha2ev = 27.211396132
max_exp = 50
min_exp =-100.
def abs2(x):
return x.real**2 + x.imag**2
def fermi(e):
""" fermi dirac function
"""
if e > max_exp:
return 0
elif e < -max_exp:
return 1
return 1/(np.exp(e)+1)
def fermi_array(e_array,ef,invsmear):
"""
Fermi dirac function for an array
"""
e_array = (e_array-ef)/invsmear
return [ fermi(e) for e in e_array]
def histogram_eiv(eiv,weights,emin=-5.0,emax=5.0,step=0.01,sigma=0.05,ctype='lorentzian'):
"""
Histogram of eigenvalues
"""
eiv = np.array(eiv)
#sigma = 0.005
x = np.arange(emin,emax,step,dtype=np.float32)
y = np.zeros([len(x)],dtype=np.float32)
if ctype == 'gaussian':
c = 1.0/(sigma*sqrt(2))
a = -1.0/(2*sigma)
else:
#lorentzian stuff
s2 = (.5*sigma)**2
c = (.5*sigma)
eiv = eiv.flatten()
weights = weights.flatten()
weights = weights[emin < eiv]
eiv = eiv[emin < eiv]
weights = weights[eiv < emax]
eiv = eiv[eiv < emax]
if ctype == 'gaussian':
for e,w in zip(eiv,weights):
x1 = (x-e)**2
#add gaussian
y += c*np.exp(a*x1)
else:
#lorentzian stuff
for e,w in zip(eiv,weights):
x1 = (x-e)**2
#add lorentzian
y += w*c/(x1+s2)
return x, y
class YamboElectronsDB():
"""
Class to read information about the electrons from the ``ns.db1`` produced by yambo
Arguments:
``lattice``: instance of YamboLatticeDB or YamboSaveDB
``filename``: netcdf database to read from (default:ns.db1)
"""
def __init__(self,lattice,save='SAVE',filename='ns.db1'):
self.lattice = lattice
self.filename = '%s/%s'%(save,filename)
self.efermi = None
self.readDB()
if self.nkpoints != self.lattice.nkpoints: #sanity check
raise ValueError("The number of k-points in the lattice database and electrons database is different.")
self.expandEigenvalues()
def readDB(self):
try:
database = Dataset(self.filename)
except:
raise IOError("Error opening file %s in YamboElectronsDB"%self.filename)
self.eigenvalues_ibz = database.variables['EIGENVALUES'][0,:]*ha2ev
self.iku_kpoints = database.variables['K-POINTS'][:].T
dimensions = database.variables['DIMENSIONS'][:]
self.nbands = dimensions[5]
self.temperature = dimensions[13]
self.nelectrons = int(dimensions[14])
self.nkpoints = int(dimensions[6])
self.nbands = int(dimensions[5])
self.spin = int(dimensions[11])
self.time_rev = dimensions[9]
database.close()
#spin degeneracy if 2 components degen 1 else degen 2
self.spin_degen = [0,2,1][int(self.spin)]
#number of occupied bands
self.nbandsv = self.nelectrons / self.spin_degen
self.nbandsc = self.nbands-self.nbandsv
def expandEigenvalues(self):
"""
Expand eigenvalues to the full brillouin zone
"""
self.eigenvalues = self.eigenvalues_ibz[self.lattice.kpoints_indexes]
self.nkpoints_ibz = len(self.eigenvalues_ibz)
self.weights_ibz = np.zeros([self.nkpoints_ibz],dtype=np.float32)
self.nkpoints = len(self.eigenvalues)
#counter counts the number of occurences of element in a list
for nk_ibz,inv_weight in collections.Counter(self.lattice.kpoints_indexes).items():
self.weights_ibz[nk_ibz] = float(inv_weight)/self.nkpoints
#kpoints weights
self.weights = np.full((self.nkpoints), 1.0/self.nkpoints,dtype=np.float32)
def getDOS(self,broad=0.1,emin=-10,emax=10,step=0.01):
"""
Calculate the density of states.
Should work for metals as well but untested for that case
"""
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
na = np.newaxis
weights_bands = np.ones(eigenvalues.shape,dtype=np.float32)*weights[:,na]
energies, self.dos = histogram_eiv(eigenvalues,weights_bands,emin=emin,emax=emax,step=step,sigma=broad)
return energies, self.dos
def setLifetimes(self,broad=0.1):
"""
Add electronic lifetimes using the DOS
"""
self.lifetimes_ibz = np.ones(self.eigenvalues_ibz.shape,dtype=np.float32)*broad
self.lifetimes = np.ones(self.eigenvalues.shape,dtype=np.float32)*broad
def setLifetimesDOS(self,broad=0.1,debug=False):
"""
Approximate the electronic lifetimes using the DOS
"""
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
#get dos
emin = np.min(eigenvalues)-broad
emax = np.max(eigenvalues)+broad
energies, dos = self.getDOS(emin=emin, emax=emax, step=0.1, broad=broad)
#normalize dos to broad
dos = dos/np.max(dos)*broad
#create a interpolation function to get the lifetimes for all the values
from scipy.interpolate import interp1d
f = interp1d(energies, dos, kind='cubic')
if debug:
"""
plot the calculated values for the DOS and the interpolated values
"""
import matplotlib.pyplot as plt
x = np.arange(emin+d,emax-d,0.001)
plt.plot(energies,dos,'o')
plt.plot(x,f(x))
plt.show()
exit()
#add imaginary part to the energies proportional to the DOS
self.lifetimes_ibz = np.array([ [f(eig) for eig in eigk] for eigk in self.eigenvalues_ibz],dtype=np.float32)
self.lifetimes = np.array([ [f(eig) for eig in eigk] for eigk in self.eigenvalues],dtype=np.float32)
def setFermi(self,fermi,invsmear):
"""
Set the fermi energy of the system
"""
self.invsmear = invsmear
self.efermi = fermi
#full brillouin zone
self.eigenvalues -= self.efermi
self.occupations = np.zeros([self.nkpoints,self.nbands],dtype=np.float32)
for nk in xrange(self.nkpoints):
self.occupations[nk] = fermi_array(self.eigenvalues[nk,:],0,self.invsmear)
#for the ibz
self.eigenvalues_ibz -= self.efermi
self.occupations_ibz = np.zeros([self.nkpoints_ibz,self.nbands],dtype=np.float32)
for nk in xrange(self.nkpoints_ibz):
self.occupations_ibz[nk] = fermi_array(self.eigenvalues_ibz[nk,:],0,self.invsmear)
return self.efermi
def setFermiFixed(self,broad=1e-5):
"""
Get fermi level using fixed occupations method
Useful for semi-conductors
"""
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
nbands = self.nelectrons/self.spin_degen
#top of valence
top = np.max(eigenvalues[:,nbands])
#bottom of conduction
bot = np.max(eigenvalues[:,nbands-1])
self.efermi = (top+bot)/2
self.setFermi(self.efermi,broad)
def energy_gaps(self,GWshift=0.):
"""
Calculate the enegy of the gap (by Fulvio Paleari)
"""
eiv = self.eigenvalues_ibz
nv = self.nbandsv
nc = self.nbandsc
homo = np.max(eiv[:,nv-1])
lumo = np.min(eiv[:,nv])
Egap = lumo-homo
for k in eiv:
if k[nv-1]==homo:
lumo_dir=k[nv]
Edir = lumo_dir-homo
eiv[:,nv:]+=GWshift
print('DFT Energy gap: %s eV'%Egap)
print('DFT Direct gap: %s eV'%Edir)
print('GW shift: %s eV'%GWshift)
return np.copy(eiv)
def getFermi(self,invsmear,setfermi=True):
"""
Determine the fermi energy
"""
if self.efermi: return self.efermi
from scipy.optimize import bisect
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
min_eival, max_eival = np.min(eigenvalues), np.max(eigenvalues)
self.invsmear = invsmear
def occupation_minus_ne(ef):
"""
The total occupation minus the total number of electrons
"""
return sum([sum(self.spin_degen*fermi_array(eigenvalues[nk],ef,self.invsmear))*weights[nk] for nk in xrange(nkpoints)])-self.nelectrons
fermi = bisect(occupation_minus_ne,min_eival,max_eival)
if setfermi: self.setFermi(fermi,invsmear)
return self.efermi
def __str__(self):
s = ""
s += "spin_degen: %d\n"%self.spin_degen
s += "nelectrons: %d\n"%self.nelectrons
s += "nbands: %d\n"%self.nbands
s += "nkpoints: %d"%self.nkpoints
return s
|
bsd-3-clause
|
google/trimmed_match
|
trimmed_match/design/tests/matched_pairs_rmse_test.py
|
1
|
12029
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint: python3
"""Tests for ads.amt.geox.trimmed_match.design."""
from absl.testing import parameterized
import pandas as pd
from trimmed_match.design import common_classes
from trimmed_match.design import matched_pairs_rmse
import unittest
GeoLevelData = common_classes.GeoLevelData
GeoXType = common_classes.GeoXType
GeoLevelPotentialOutcomes = common_classes.GeoLevelPotentialOutcomes
MatchedPairsRMSE = matched_pairs_rmse.MatchedPairsRMSE
class ConstructPotentialOutcomesTest(unittest.TestCase):
def setUp(self):
super().setUp()
self._geox_eval_data = pd.DataFrame({
"geo": [1, 2],
"response": [10, 20],
"spend": [10, 20]
})
self._budget = 30
self._hypothesized_iroas = 1
def testGoDark(self):
potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(
GeoXType.GO_DARK, self._geox_eval_data,
(self._budget * 2.0 / self._geox_eval_data.spend.sum()),
self._hypothesized_iroas)
expected = {
1:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=1, response=10, spend=20),
treated=GeoLevelData(geo=1, response=0, spend=0)),
2:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=2, response=20, spend=40),
treated=GeoLevelData(geo=2, response=0, spend=0))
}
self.assertDictEqual(expected, potential_outcomes)
def testHeavyUp(self):
potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(
GeoXType.HEAVY_UP, self._geox_eval_data,
(self._budget * 2.0 / self._geox_eval_data.spend.sum()),
self._hypothesized_iroas)
expected = {
1:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=1, response=10, spend=10),
treated=GeoLevelData(geo=1, response=30.0, spend=30.0)),
2:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=2, response=20, spend=20),
treated=GeoLevelData(geo=2, response=60.0, spend=60.0))
}
self.assertDictEqual(expected, potential_outcomes)
def testHeavyDown(self):
potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(
GeoXType.HEAVY_DOWN, self._geox_eval_data, self._budget,
self._hypothesized_iroas)
expected = {
1:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=1, response=10, spend=10),
treated=GeoLevelData(geo=1, response=0.0, spend=0.0)),
2:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=2, response=20, spend=20),
treated=GeoLevelData(geo=2, response=0.0, spend=0.0))
}
self.assertDictEqual(expected, potential_outcomes)
def testHoldBack(self):
potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(
GeoXType.HOLD_BACK, self._geox_eval_data,
(self._budget * 2.0 / self._geox_eval_data.spend.sum()),
self._hypothesized_iroas)
expected = {
1:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=1, response=10, spend=0.0),
treated=GeoLevelData(geo=1, response=30.0, spend=20.0)),
2:
GeoLevelPotentialOutcomes(
controlled=GeoLevelData(geo=2, response=20, spend=0.0),
treated=GeoLevelData(geo=2, response=60.0, spend=40.0))
}
self.assertDictEqual(expected, potential_outcomes)
def testUnknownGeoXType(self):
"""Checks an error is raised if the GeoX type is unknown."""
with self.assertRaisesRegex(ValueError, "Unknown geox_type: \'UNKNOWN\'"):
matched_pairs_rmse._construct_potential_outcomes(
"UNKNOWN", self._geox_eval_data,
(self._budget * 2.0 / self._geox_eval_data.spend.sum()),
self._hypothesized_iroas)
class IsPairedTest(unittest.TestCase):
def setUp(self):
super().setUp()
self._ordered_list_one = [1, 1, 2, 2, 3, 3]
self._ordered_list_two = [1, 2, 2, 2, 3, 3]
def testIsPaired(self):
self.assertTrue(matched_pairs_rmse._is_paired(self._ordered_list_one))
def testIsNotPaired(self):
self.assertFalse(matched_pairs_rmse._is_paired(self._ordered_list_two))
class MatchedPairsRMSETest(unittest.TestCase):
def setUp(self):
super().setUp()
self._geo_pairs_eval_data = pd.DataFrame({
"geo": [1, 2, 3, 4],
"pair": [1, 1, 2, 2],
"response": [10, 20, 30, 40],
"spend": [10, 20, 30, 40]
})
self._perfect_geo_pairs_eval_data = pd.DataFrame({
"geo": [1, 2, 3, 4],
"pair": [1, 1, 2, 2],
"response": [10, 10, 30, 30],
"spend": [10, 10, 30, 30]
})
self._budget = 10
self._hypothesized_iroas = 1
# TODO(b/147698415): adding a more complex test example here
def AssertEqualGeoLevelData(self, outcome1: GeoLevelData,
outcome2: GeoLevelData):
"""Checks whether two GeoLevelDatas are equal."""
self.assertEqual(outcome1.geo, outcome2.geo)
self.assertEqual(outcome1.response, outcome2.response)
self.assertEqual(outcome1.spend, outcome2.spend)
def testHypothesizedIroasNegative(self):
"""Checks an error is raised if the hypothesized iROAS is negative."""
with self.assertRaisesRegex(ValueError, "iROAS must be positive, got -1.0"):
MatchedPairsRMSE(GeoXType.GO_DARK, self._geo_pairs_eval_data,
self._budget, -1.0)
def testGeosNotUnique(self):
"""Checks an error is raised if geos are duplicated."""
geo_pairs_eval_data = self._geo_pairs_eval_data.copy()
geo_pairs_eval_data.loc[geo_pairs_eval_data["geo"] == 3, "geo"] = 1
with self.assertRaisesRegex(ValueError,
"Geos are not unique in geo_pairs_eval_data"):
MatchedPairsRMSE(GeoXType.GO_DARK, geo_pairs_eval_data, self._budget,
self._hypothesized_iroas)
def testGeosNotPairedProperly(self):
"""Checks an error is raised if geos are not paired properly."""
geo_pairs_eval_data = self._geo_pairs_eval_data.copy()
geo_pairs_eval_data.loc[geo_pairs_eval_data["geo"] == 3, "pair"] = 1
with self.assertRaisesRegex(
KeyError, "Geos in geo_pairs_eval_data are not paired properly"):
MatchedPairsRMSE(GeoXType.GO_DARK, geo_pairs_eval_data, self._budget,
self._hypothesized_iroas)
def testSimulateGeoXDataRandomization(self):
"""Checks randomization within the pair."""
mpr = MatchedPairsRMSE(GeoXType.GO_DARK, self._geo_pairs_eval_data,
self._budget, self._hypothesized_iroas)
geox_data = mpr._simulate_geox_data(0)
for pair, value in geox_data.items():
expected = mpr._paired_geos[pair].values()
self.assertSetEqual(
set(expected), set([value.controlled.geo, value.treated.geo]))
def testSimulatedGeoXDataValue(self):
"""Checks the data accuracy."""
for geox_type in GeoXType:
if geox_type == GeoXType.CONTROL:
continue
mpr = MatchedPairsRMSE(geox_type, self._geo_pairs_eval_data, self._budget,
self._hypothesized_iroas)
geox_data = mpr._simulate_geox_data(0)
for _, value in geox_data.items():
treatment_geo = value.treated.geo
control_geo = value.controlled.geo
treatment_geo_outcome = mpr._potential_outcomes[treatment_geo].treated
control_geo_outcome = mpr._potential_outcomes[control_geo].controlled
self.AssertEqualGeoLevelData(treatment_geo_outcome, value.treated)
self.AssertEqualGeoLevelData(control_geo_outcome, value.controlled)
def testReportValueError(self):
mpr = MatchedPairsRMSE(
GeoXType.HOLD_BACK,
self._geo_pairs_eval_data,
self._budget,
self._hypothesized_iroas,
base_seed=1000)
@parameterized.parameters((-0.1, -0.2), (0.5, 0.1), (0.25, 0.3))
def _(self, max_trim_rate, trim_rate):
with self.assertRaises(ValueError):
mpr.report(1, max_trim_rate, trim_rate)
def testReportPerfectiROAS(self):
"""Checks the calculation with zero RMSE."""
for geox_type in GeoXType:
if geox_type in [GeoXType.HOLD_BACK, GeoXType.CONTROL, GeoXType.GO_DARK]:
continue
mpr = MatchedPairsRMSE(
geox_type,
self._geo_pairs_eval_data,
self._budget,
self._hypothesized_iroas,
base_seed=1000)
(report, _) = mpr.report(num_simulations=100, trim_rate=0.0)
self.assertEqual(0.0, report)
def testReportPerfectPairs(self):
"""Checks the calculation with perfect pairs."""
for geox_type in GeoXType:
if geox_type == GeoXType.CONTROL:
continue
mpr = MatchedPairsRMSE(
geox_type,
self._perfect_geo_pairs_eval_data,
self._budget,
0.0,
base_seed=1000)
report, _ = mpr.report(num_simulations=100, trim_rate=0.0)
self.assertEqual(0.0, report)
def testReportNoisy(self):
"""Checks the calculation with nonzero RMSE."""
mpr = MatchedPairsRMSE(
GeoXType.HOLD_BACK,
self._geo_pairs_eval_data,
self._budget,
self._hypothesized_iroas,
base_seed=100000)
(report, _) = mpr.report(num_simulations=100, trim_rate=0.0)
self.assertAlmostEqual(1.5, report, delta=0.1)
def testReportNoisyDifferentGeoOrder(self):
"""Checks the calculation with nonzero RMSE when geo_pairs_eval_data order is changed."""
mpr = MatchedPairsRMSE(
GeoXType.HOLD_BACK,
self._geo_pairs_eval_data,
self._budget,
self._hypothesized_iroas,
base_seed=100000)
(report, _) = mpr.report(num_simulations=100)
mpr_sorted = MatchedPairsRMSE(
GeoXType.HOLD_BACK,
self._geo_pairs_eval_data.sort_values(
by=["pair", "geo"], ascending=[True, False]),
self._budget,
self._hypothesized_iroas,
base_seed=100000)
(report_sorted, _) = mpr_sorted.report(num_simulations=100)
self.assertAlmostEqual(
abs(report - report_sorted) / report_sorted, 0, delta=0.00001)
def testReportTrimmedPairs(self):
"""Checks the reported trimmed pairs in a simulation."""
dataframe = pd.DataFrame({
"geo": [1, 2, 3, 4, 5, 6, 7, 8],
"response": [10, 11, 20, 30, 30, 33, 40, 48],
"spend": [1.0, 2.0, 3.0, 7.0, 3.0, 5.0, 4.0, 9.0],
"pair": [1, 1, 2, 2, 3, 3, 4, 4],
})
base_seed = 1000
trimmed_pairs = {
GeoXType.GO_DARK: [2, 3],
GeoXType.HOLD_BACK: [2, 3],
GeoXType.HEAVY_UP: [3, 4],
GeoXType.HEAVY_DOWN: [3, 4]
}
for geox_type in GeoXType:
if geox_type == GeoXType.CONTROL:
continue
mpr = MatchedPairsRMSE(
geox_type=geox_type,
geo_pairs_eval_data=dataframe,
budget=1.0,
hypothesized_iroas=0.0,
base_seed=base_seed)
_, report = mpr.report(num_simulations=1, trim_rate=0.0)
self.assertFalse(report.trimmed_pairs.values[0])
_, report = mpr.report(
num_simulations=1, trim_rate=0.25, max_trim_rate=0.25)
self.assertCountEqual(report.trimmed_pairs.values[0],
trimmed_pairs[geox_type])
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
GiggleLiu/tba
|
hgen/tests/test_kron.py
|
1
|
1935
|
from numpy import *
from numpy.testing import dec,assert_,assert_raises,assert_almost_equal,assert_allclose
from matplotlib.pyplot import *
from numpy.linalg import eigh,eigvalsh
from scipy import sparse as sps
from scipy.sparse.linalg import eigsh
import time,pdb,sys
sys.path.insert(0,'../')
from tba.hgen import SpaceConfig,SuperSpaceConfig,SpinSpaceConfig,RHGenerator,op_simple_hopping,op_U,op_simple_onsite
from tba.lattice import Structure
from blockmatrix.blocklib import eigbsh,eigbh,get_blockmarker,tobdmatrix,SimpleBMG
import kron
def test_kron():
for shpA,shpB in [((0,0),(0,0)),((3,3),(6,3)),\
((2,0),(0,2)),((100,200),(100,200))]:
print shpA,'x',shpB
A=random.random(shpA)
B=random.random(shpB)
A[A<0.8]=0
B[B<0.8]=0
A=sps.csr_matrix(A)
B=sps.csr_matrix(B)
A_coo,B_coo=A.tocoo(),B.tocoo()
t0=time.time()
res1=sps.kron(A_coo,B_coo)
t1=time.time()
res2=kron.kron_csr(A,B)
t2=time.time()
res3=kron.kron_coo(A_coo,B_coo)
t3=time.time()
assert_allclose((res1-res2).data,0)
print 'Test kron time used %s(csr),%s(coo) compared to scipy %s'%(t2-t1,t3-t2,t1-t0)
pdb.set_trace()
def test_kron_takerow():
for shpA,shpB in [((2,0),(2,0)),((100,200),(100,200))]:
print shpA,'x',shpB
A=random.random(shpA)
B=random.random(shpB)
rows=random.randint(0,A.shape[0]*B.shape[0],A.shape[0]*B.shape[0]/2)
A[A<0.8]=0
B[B<0.8]=0
A=sps.csr_matrix(A)
B=sps.csr_matrix(B)
t0=time.time()
res1=sps.kron(A,B).asformat('csr')[rows]
t1=time.time()
res2=kron.kron_csr(A,B,takerows=rows)
t2=time.time()
assert_allclose((res1-res2).data,0)
print 'Test kron(take-row) time used %s(csr) compared to scipy %s'%(t2-t1,t1-t0)
pdb.set_trace()
test_kron_takerow()
test_kron()
|
gpl-2.0
|
mantidproject/mantid
|
scripts/SANS/sans/algorithm_detail/beamcentrefinder_plotting.py
|
3
|
1330
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import sys
IN_WORKBENCH = False
if "workbench.app.mainwindow" in sys.modules:
try:
from mantidqt.plotting.functions import plot
IN_WORKBENCH = True
except ImportError:
pass
def can_plot_beamcentrefinder():
return IN_WORKBENCH
def _plot_quartiles_matplotlib(output_workspaces, sample_scatter):
title = '{}_beam_centre_finder'.format(sample_scatter)
ax_properties = {'xscale': 'log',
'yscale': 'log'}
plot_kwargs = {"scalex": True,
"scaley": True}
if not isinstance(output_workspaces, list):
output_workspaces = [output_workspaces]
assert output_workspaces, "No workspaces were passed into plotting"
plot(output_workspaces, wksp_indices=[0], ax_properties=ax_properties, overplot=True,
plot_kwargs=plot_kwargs, window_title=title)
def plot_workspace_quartiles(output_workspaces, sample_scatter):
if IN_WORKBENCH:
_plot_quartiles_matplotlib(output_workspaces, sample_scatter)
|
gpl-3.0
|
xyguo/scikit-learn
|
sklearn/ensemble/weight_boosting.py
|
28
|
40740
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
bsd-3-clause
|
michigraber/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
44
|
22866
|
# Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
|
bsd-3-clause
|
hrjn/scikit-learn
|
examples/ensemble/plot_voting_probas.py
|
316
|
2824
|
"""
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
|
bsd-3-clause
|
pranavtbhat/EE219
|
project1/task2a.py
|
1
|
3417
|
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_predict, cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn import linear_model
from sklearn.neural_network import MLPRegressor
import statsmodels.formula.api as sm
import Functions
from OneHotEncode import one_hot_dataframe
import Plots
import utils
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
### Changed the column names in data file to use formula in ols
# Load the dataset
# Columns:
# 0 -> WeekNo
# 1 -> DayofWeek
# 2 -> BackupStartTime
# 3 -> WorkFlowID
# 4 -> FileName
# 5 -> SizeofBackupGB
# 6 -> BackupTimeHour
###
data = pd.read_csv("datasets/network_backup_dataset.csv")
data = data.replace({'DayofWeek': {'Monday' : 0, 'Tuesday' : 1, 'Wednesday' : 2 , 'Thursday' : 3, 'Friday' : 4,
'Saturday' : 5, 'Sunday' : 6 }})
X = data.ix[:, [0, 1, 2, 3,4, 6]].values
X[:, 3] = utils.encode_work_flows(X[:, 3])
X[:, 4] = utils.encode_files(X[:, 4])
y = data.ix[:, 5].values
uniqueWorkFlow = sorted(pd.unique(data['WorkFlowID'])) # get unique workFlow values
uniqueFiles = ['File_{0}'.format(s) for s in xrange(len((pd.unique(data['FileName']))))] # get unique fileName values
networkDataset=data
for i,j in zip(uniqueWorkFlow,range(len(uniqueWorkFlow))):
networkDataset = networkDataset.replace({'WorkFlowID': {i : j}})
for i,j in zip(uniqueFiles,range(len(uniqueFiles))):
networkDataset = networkDataset.replace({'FileName': {i : j}})
## Uncomment to use data without columns FileName and WeekNumber
# dataNew=data
# dataNew['WorkFlowID']=networkDataset['WorkFlowID']
# dataNew['FileName']=networkDataset['FileName']
# dataNew['SizeofBackupGB']=y
# del dataNew['FileName']
# del dataNew['WeekNo']
###
# Part a: Linear Regression Model
# Check the graphs
###
reg = linear_model.LinearRegression()
y_predicted = cross_val_predict(reg, X, y, cv=10)
print utils.rmse(y,y_predicted)
#Almost same as above rmse
cv_scores = cross_val_score(reg, X, y, cv=10, scoring='neg_mean_squared_error')
print (sum(cv_scores)/-10.0)**0.5
fig, ax = plt.subplots()
ax.scatter(x=y, y=y_predicted)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Actual')
ax.set_ylabel('Fitted')
plt.savefig('plots/lrActualvsFitted-4features.png', format='png')
plt.clf()
#Residual
y_residual = y - y_predicted
fig, ax = plt.subplots()
ax.scatter(y_predicted, y_residual)
ax.set_xlabel('Fitted')
ax.set_ylabel('Residual')
plt.savefig('plots/lrFittedvsResidual.png', format='png')
plt.clf()
model = sm.ols('SizeofBackupGB ~ WeekNo+ DayofWeek + BackupStartTime + WorkFlowID+FileName + BackupTimeHour ',networkDataset).fit()
print model.summary()
## Uncomment to check the effect with one hot encoding
# network_data = pd.read_csv('datasets/network_backup_dataset.csv')
# #One Hot Encoding
# one_hot_data, _, _ = one_hot_dataframe(network_data, ['DayofWeek', 'WorkFlowID','FileName'], replace=True)
#
# feature_cols = [col for col in one_hot_data.columns if col not in ['SizeofBackupGB']]
# X = one_hot_data[feature_cols]
# y = one_hot_data['SizeofBackupGB']
#
# all_columns = " + ".join(one_hot_data.columns - ["SizeofBackupGB"])
#
# my_formula = "'SizeofBackupGB ~ " + all_columns+"'"
# print my_formula
#
# model = sm.ols(formula=my_formula,data=one_hot_data).fit()
# print model.summary()
|
unlicense
|
studywolf/blog
|
SymPy/test_timing.py
|
1
|
3065
|
'''
Copyright (C) 2016 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import matplotlib.pyplot as plt
import numpy as np
import seaborn
import timeit
# Test 1 ----------------------------------------------------------------------
print('\nTest function 1: ')
time_sympy1 = timeit.timeit(
stmt='f(np.random.random(), np.random.random())',
setup='import numpy as np;\
import sympy as sp;\
q0 = sp.Symbol("q0");\
l0 = sp.Symbol("l0");\
a = sp.cos(q0) * l0;\
f = sp.lambdify((q0, l0), a, "numpy")')
print('Sympy lambdify function 1 time: ', time_sympy1)
time_hardcoded1 = timeit.timeit(
stmt='np.cos(np.random.random())*np.random.random()',
setup='import numpy as np')
print('Hard coded function 1 time: ', time_hardcoded1)
# Test 2 ----------------------------------------------------------------------
print('\nTest function 2: ')
time_sympy2 = timeit.timeit(
stmt='f(np.random.random(), np.random.random(), np.random.random(),\
np.random.random(), np.random.random(), np.random.random())',
setup='import numpy as np;\
import sympy as sp;\
q0 = sp.Symbol("q0");\
q1 = sp.Symbol("q1");\
q2 = sp.Symbol("q2");\
l0 = sp.Symbol("l0");\
l1 = sp.Symbol("l1");\
l2 = sp.Symbol("l2");\
a = l1*sp.sin(q0 - l0*sp.sin(q1)*sp.cos(q2) - l2*sp.sin(q2) -\
l0*sp.sin(q1) + q0*l0)*sp.cos(q0) + l2*sp.sin(q0);\
f = sp.lambdify((q0,q1,q2,l0,l1,l2), a, "numpy")')
print('Sympy lambdify function 2 time: ', time_sympy2)
time_hardcoded2 = timeit.timeit(
stmt='l1*np.sin(q0 - l0*np.sin(q1)*np.cos(q2) - l2*np.sin(q2) -\
l0*np.sin(q1) + q0*l0)*np.cos(q0) + l2*np.sin(q0)',
setup='import numpy as np;\
q0 = np.random.random();\
q1 = np.random.random();\
q2 = np.random.random();\
l0 = np.random.random();\
l1 = np.random.random();\
l2 = np.random.random()')
print('Hard coded function 2 time: ', time_hardcoded2)
ind = np.arange(2)
width = 0.35
fig, ax = plt.subplots(figsize=(5, 3))
plt.bar(ind, [time_sympy1, time_sympy2], width, color='b')
plt.bar(ind + width, [time_hardcoded1, time_hardcoded2], width, color='r')
plt.xlim([0, ind[-1]+width*2])
plt.ylabel('Simulation time (s)')
ax.set_xticks(ind + width)
ax.set_xticklabels(['Function 1', 'Function 2'])
ax.legend(['Sympy', 'Hard-coded'], loc='best')
plt.show()
|
gpl-3.0
|
kastnerkyle/kaggle-decmeg2014
|
shift_to_energy_peak.py
|
1
|
4209
|
import numpy as np
from scipy.ndimage import gaussian_filter
from scipy.io import loadmat
from path import path
from sklearn.decomposition import PCA
import scipy.signal as sig
def notch(Wn, bandwidth):
"""
Notch filter to kill line-noise.
"""
f = Wn / 2.0
R = 1.0 - 3.0 * (bandwidth / 2.0)
num = 1.0 - 2.0 * R * np.cos(2 * np.pi * f) + R ** 2.
denom = 2.0 - 2.0 * np.cos(2 * np.pi * f)
K = num / denom
b = np.zeros(3)
a = np.zeros(3)
a[0] = 1.0
a[1] = -2.0 * R * np.cos(2 * np.pi * f)
a[2] = R ** 2.
b[0] = K
b[1] = -2.0 * K * np.cos(2 * np.pi * f)
b[2] = K
return b, a
def apply_notch(X):
bw = .04
freq = .4
b, a = notch(freq, bw)
return sig.lfilter(b, a, X)
def normalize(X):
norms = np.sqrt((X ** 2).sum(-1))
X /= norms[:, :, np.newaxis]
def energy_peak(X, sigma=10):
energy = (X ** 2).mean(0).mean(0)
smoothed = gaussian_filter(energy, sigma=sigma)
return smoothed.argmax()
def shift_to_energy_peak(X, before=20, after=40, sigma=10):
peak = energy_peak(X, sigma=sigma)
start = peak - before
end = peak + after
return X[:, start:end]
def dim_reduce_sensors(X, n_components=30):
XX = X.transpose(1, 0, 2).reshape(X.shape[1], -1).T
pca = PCA(n_components=n_components)
return pca.inverse_transform(pca.fit_transform(XX
)).reshape(len(X), X.shape[-1], X.shape[1]).transpose(0, 2, 1)
def dim_reduce_sensors_svd(X, n_components=10):
XX = X.transpose(1, 0, 2).reshape(X.shape[1], -1)
U, S, VT = np.linalg.svd(XX, full_matrices=False)
S[n_components:] = 0.
XX = U.dot(S[:, np.newaxis] * VT)
return XX.reshape(X.shape[1], X.shape[0], X.shape[2]).transpose(1, 0, 2)
def project_to_nice_timecourses(X):
timecourses = np.load("timecourses.npz")["A"]
timecourses /= np.sqrt((timecourses ** 2).sum(0))
return X.dot(timecourses)
def remove_worst_trials(X, y, n_remove=10):
keep_indices = sorted((X ** 2).sum(1).sum(1).argsort()[::-1][n_remove:])
return X[keep_indices], y[keep_indices]
data_dir = path('data')
train_subject_ids = range(1, 17)
test_subject_ids = range(17, 24)
train_subject_names = ["train_subject%02d.mat" % sid
for sid in train_subject_ids]
test_subject_names = ["test_subject%02d.mat" % sid
for sid in test_subject_ids]
all_train_data = []
all_train_targets = []
labels = []
for i, subject in enumerate(train_subject_names):
f = loadmat(data_dir / subject)
X = f['X'][:, 160:]
y = f['y'].ravel() * 2 - 1
X, y = remove_worst_trials(X, y)
X = apply_notch(X)
normalize(X)
# X = dim_reduce_sensors(X, n_components=2)[:, :, 125:250]
X = X[:, :, 125:250]
X_cropped = X[:, :, 20:80] # shift_to_energy_peak(X, before=20, after=40)
# X_cropped = project_to_nice_timecourses(X)
all_train_data.append(X_cropped)
all_train_targets.append(y)
labels.append([i] * len(X_cropped))
all_train_data = np.concatenate(all_train_data)
all_train_targets = np.concatenate(all_train_targets)
labels = np.concatenate(labels)
from sklearn.cross_validation import cross_val_score, LeaveOneLabelOut
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
scaler = StandardScaler()
clf = LogisticRegression(C=1e-1, penalty="l2")
# clf = ExtraTreesClassifier(n_estimators=100)
pipeline = Pipeline([('scaler', scaler), ('estimator', clf)])
cv = LeaveOneLabelOut(labels)
# all_train_data = dim_reduce_sensors_svd(all_train_data, n_components=10)
all_scores = []
for i in xrange(all_train_data.shape[-1]):
scores = cross_val_score(pipeline,
all_train_data[:, :, i],
all_train_targets,
cv=cv,
verbose=100)
all_scores.append(scores)
# scores = cross_val_score(pipeline,
# all_train_data.reshape(len(all_train_data), -1),
# all_train_targets,
# cv=cv,
# verbose=100)
|
bsd-3-clause
|
leofdecarvalho/MachineLearning
|
9. Artificial_Neural_Networks/ann.py
|
5
|
2306
|
# Artificial Neural Network
# Installing Theano
# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
# Installing Tensorflow
# pip install tensorflow
# Installing Keras
# pip install --upgrade keras
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
# Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Part 3 - Making predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
|
mit
|
nelson-liu/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
4
|
81531
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = check_array(y, copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
X = check_array(X, dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations',
ConvergenceWarning)
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
timothydmorton/bokeh
|
examples/compat/mpl/lc_offsets.py
|
34
|
1096
|
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
rs = np.random.RandomState([12345678])
yy = np.linspace(0, 2 * np.pi, nverts)
ym = np.amax(yy)
xx = (0.2 + (ym - yy) / ym) ** 2 * np.cos(yy - 0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02 * rs.randn(nverts)
curve = list(zip(xxx, yy * 100))
segs.append(curve)
colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0),
(0.0, 0.75, 0.75, 1.0), (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0),
(0.0, 0.0, 0.0, 1.0)]
col = LineCollection(segs, linewidth=5, offsets=offs)
ax = plt.axes()
ax.add_collection(col, autolim=True)
col.set_color(colors)
ax.set_title('Successive data offsets')
fig = plt.gcf()
output_file("lc_offsets.html")
show(mpl.to_bokeh())
|
bsd-3-clause
|
lazywei/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
terhorst/psmcpp
|
smcpp/plotting.py
|
2
|
5252
|
from __future__ import absolute_import, division, print_function
import json
import matplotlib, matplotlib.style, matplotlib.cm
matplotlib.use("Agg")
matplotlib.style.use("seaborn-ticks")
import numpy as np
from numpy import array
from collections import defaultdict
import smcpp.defaults
from . import model
def pretty_plot():
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
return fig, ax
def plot_psfs(psfs, xlim, ylim, xlabel, knots=False, logy=False, stats={}):
fig, ax = pretty_plot()
xmax = ymax = 0.
xmin = ymin = np.inf
labels = []
series = []
data = [["label", "x", "y", "plot_type", "plot_num"]]
def saver(f, ty):
def g(x, y, label, data=data, **kwargs):
data += [(label, xx, yy, ty, saver.plot_num) for xx, yy in zip(x, y)]
saver.plot_num += 1
if label not in g.seen:
g.seen.append(label)
kwargs["label"] = label
return f(x, y, **kwargs)
g.i = 0
g.seen = []
return g
saver.plot_num = 0
my_axplot = saver(ax.plot, "path")
my_axstep = saver(ax.step, "step")
vlines = []
models = []
for i, (d, off) in enumerate(psfs):
g = d.get("g") or 1
if "b" in d:
N0 = d["N0"]
a = d["a"]
s = d["s"]
b = d["b"]
slope = np.log(a / b) / s
cum = 0.
x = []
y = []
for aa, bb, ss in zip(b[:-1], slope[:-1], s[:-1]):
tt = np.linspace(cum, cum + ss, 200)
yy = aa * np.exp(bb * (cum + ss - tt))
x = np.concatenate([x, tt])
y = np.concatenate([y, yy])
cum += ss
x = np.concatenate([x, [cum, 2 * cum]])
y = np.concatenate([y, [a[-1], a[-1]]])
# if not logy:
# y *= 1e-3
series.append((None, x, y, my_axplot, off, N0, g))
elif "model" in d:
cls = getattr(model, d["model"]["class"])
mb = cls.from_dict(d["model"])
models.append(mb)
split = False
if isinstance(mb, model.SMCTwoPopulationModel):
split = True
ms = [mb.for_pop(pid) for pid in mb.pids]
labels = mb.pids
else:
ms = [mb]
labels = [mb.pid]
for m, l in zip(ms, labels):
ak = len(smcpp.defaults.additional_knots)
x = np.cumsum(m.s)
y = m.stepwise_values().astype("float")
x = np.insert(x, 0, 0)
y = np.insert(y, 0, y[0])
if split and l == mb.pids[-1]:
vlines.append(mb.split * 2 * m.N0 * g)
xf = x < mb.split
x = x[xf]
x = np.r_[x, mb.split]
y = y[xf]
y = np.r_[y, y[-1]]
split = False
series.append([l, x, y, my_axplot, off, m.N0, g])
if knots and hasattr(m, "_knots"):
knots = m._knots[:-ak]
x2, y2 = (knots, np.exp(m[:-ak].astype("float")))
# if not logy:
# y *= 1e-3
series.append([None, x2, y2, ax.scatter, off, m.N0, g])
else:
x = np.cumsum(d["s"])
x = np.insert(x, 0, 0)[:-1]
y = d["a"]
series.append((None, x, y, my_axstep, off, N0, g))
for statname in stats:
magg = model.aggregate(*models, stat=stats[statname])
series.append(
[
statname,
np.cumsum(magg.s),
magg.stepwise_values().astype("float"),
my_axplot,
0.,
magg.N0,
g,
]
)
labels = []
NUM_COLORS = len({label for label, *_ in series})
cm = matplotlib.cm.get_cmap("gist_rainbow")
COLORS = [cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)]
label_colors = defaultdict(lambda: COLORS[len(label_colors)])
for label, x, y, plotfun, off, N0, g in series:
xp = 2 * N0 * g * x + off
yp = N0 * y
if label is None:
plotfun(xp, yp, linewidth=2, label=label, color="black")
else:
labels += plotfun(
xp, yp, label=label, linewidth=2, color=label_colors[label]
)
if len(xp) > 2:
xmin = min(xmin, xp[1] * 0.9)
ymin = min(ymin, np.min(yp))
ymax = max(ymax, np.max(yp))
xmax = max(xmax, np.max(xp))
if labels:
first_legend = ax.legend(handles=labels, loc=9, ncol=4, prop={"size": 8})
for x in vlines:
ax.axvline(x)
ax.set_xscale("log")
ax.set_ylabel(r"$N_e$")
if logy:
ax.set_yscale("log")
ax.set_xlabel(xlabel)
if not xlim:
xlim = (xmin, xmax)
if not ylim:
ylim = (.9 * ymin, 1.1 * ymax)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
fig.tight_layout()
return fig, data
|
gpl-3.0
|
massmutual/scikit-learn
|
sklearn/ensemble/tests/test_bagging.py
|
13
|
25689
|
"""
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
|
bsd-3-clause
|
fx2003/tensorflow-study
|
TensorFlow实战/models/cognitive_mapping_and_planning/tfcode/nav_utils.py
|
14
|
18657
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various losses for training navigation agents.
Defines various loss functions for navigation agents,
compute_losses_multi_or.
"""
import os, numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.nets import resnet_v2
from tensorflow.python.training import moving_averages
import logging
from src import utils
import src.file_utils as fu
from tfcode import tf_utils
def compute_losses_multi_or(logits, actions_one_hot, weights=None,
num_actions=-1, data_loss_wt=1., reg_loss_wt=1.,
ewma_decay=0.99, reg_loss_op=None):
assert(num_actions > 0), 'num_actions must be specified and must be > 0.'
with tf.name_scope('loss'):
if weights is None:
weight = tf.ones_like(actions_one_hot, dtype=tf.float32, name='weight')
actions_one_hot = tf.cast(tf.reshape(actions_one_hot, [-1, num_actions],
're_actions_one_hot'), tf.float32)
weights = tf.reduce_sum(tf.reshape(weights, [-1, num_actions], 're_weight'),
reduction_indices=1)
total = tf.reduce_sum(weights)
action_prob = tf.nn.softmax(logits)
action_prob = tf.reduce_sum(tf.multiply(action_prob, actions_one_hot),
reduction_indices=1)
example_loss = -tf.log(tf.maximum(tf.constant(1e-4), action_prob))
data_loss_op = tf.reduce_sum(example_loss * weights) / total
if reg_loss_op is None:
if reg_loss_wt > 0:
reg_loss_op = tf.add_n(tf.losses.get_regularization_losses())
else:
reg_loss_op = tf.constant(0.)
if reg_loss_wt > 0:
total_loss_op = data_loss_wt*data_loss_op + reg_loss_wt*reg_loss_op
else:
total_loss_op = data_loss_wt*data_loss_op
is_correct = tf.cast(tf.greater(action_prob, 0.5, name='pred_class'), tf.float32)
acc_op = tf.reduce_sum(is_correct*weights) / total
ewma_acc_op = moving_averages.weighted_moving_average(
acc_op, ewma_decay, weight=total, name='ewma_acc')
acc_ops = [ewma_acc_op]
return reg_loss_op, data_loss_op, total_loss_op, acc_ops
def get_repr_from_image(images_reshaped, modalities, data_augment, encoder,
freeze_conv, wt_decay, is_training):
# Pass image through lots of convolutional layers, to obtain pool5
if modalities == ['rgb']:
with tf.name_scope('pre_rgb'):
x = (images_reshaped + 128.) / 255. # Convert to brightness between 0 and 1.
if data_augment.relight and is_training:
x = tf_utils.distort_image(x, fast_mode=data_augment.relight_fast)
x = (x-0.5)*2.0
scope_name = encoder
elif modalities == ['depth']:
with tf.name_scope('pre_d'):
d_image = images_reshaped
x = 2*(d_image[...,0] - 80.0)/100.0
y = d_image[...,1]
d_image = tf.concat([tf.expand_dims(x, -1), tf.expand_dims(y, -1)], 3)
x = d_image
scope_name = 'd_'+encoder
resnet_is_training = is_training and (not freeze_conv)
with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope(resnet_is_training)):
fn = getattr(tf_utils, encoder)
x, end_points = fn(x, num_classes=None, global_pool=False,
output_stride=None, reuse=None,
scope=scope_name)
vars_ = slim.get_variables_to_restore()
conv_feat = x
return conv_feat, vars_
def default_train_step_kwargs(m, obj, logdir, rng_seed, is_chief, num_steps,
iters, train_display_interval,
dagger_sample_bn_false):
train_step_kwargs = {}
train_step_kwargs['obj'] = obj
train_step_kwargs['m'] = m
# rng_data has 2 independent rngs, one for sampling episodes and one for
# sampling perturbs (so that we can make results reproducible.
train_step_kwargs['rng_data'] = [np.random.RandomState(rng_seed),
np.random.RandomState(rng_seed)]
train_step_kwargs['rng_action'] = np.random.RandomState(rng_seed)
if is_chief:
train_step_kwargs['writer'] = tf.summary.FileWriter(logdir) #, m.tf_graph)
else:
train_step_kwargs['writer'] = None
train_step_kwargs['iters'] = iters
train_step_kwargs['train_display_interval'] = train_display_interval
train_step_kwargs['num_steps'] = num_steps
train_step_kwargs['logdir'] = logdir
train_step_kwargs['dagger_sample_bn_false'] = dagger_sample_bn_false
return train_step_kwargs
# Utilities for visualizing and analysing validation output.
def save_d_at_t(outputs, global_step, output_dir, metric_summary, N):
"""Save distance to goal at all time steps.
Args:
outputs : [gt_dist_to_goal].
global_step : number of iterations.
output_dir : output directory.
metric_summary : to append scalars to summary.
N : number of outputs to process.
"""
d_at_t = np.concatenate(map(lambda x: x[0][:,:,0]*1, outputs), axis=0)
fig, axes = utils.subplot(plt, (1,1), (5,5))
axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.')
axes.set_xlabel('time step')
axes.set_ylabel('dist to next goal')
axes.grid('on')
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step))
utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True)
plt.close(fig)
return None
def save_all(outputs, global_step, output_dir, metric_summary, N):
"""Save numerous statistics.
Args:
outputs : [locs, goal_loc, gt_dist_to_goal, node_ids, perturbs]
global_step : number of iterations.
output_dir : output directory.
metric_summary : to append scalars to summary.
N : number of outputs to process.
"""
all_locs = np.concatenate(map(lambda x: x[0], outputs), axis=0)
all_goal_locs = np.concatenate(map(lambda x: x[1], outputs), axis=0)
all_d_at_t = np.concatenate(map(lambda x: x[2][:,:,0]*1, outputs), axis=0)
all_node_ids = np.concatenate(map(lambda x: x[3], outputs), axis=0)
all_perturbs = np.concatenate(map(lambda x: x[4], outputs), axis=0)
file_name = os.path.join(output_dir, 'all_locs_at_t_{:d}.pkl'.format(global_step))
vars = [all_locs, all_goal_locs, all_d_at_t, all_node_ids, all_perturbs]
var_names = ['all_locs', 'all_goal_locs', 'all_d_at_t', 'all_node_ids', 'all_perturbs']
utils.save_variables(file_name, vars, var_names, overwrite=True)
return None
def eval_ap(outputs, global_step, output_dir, metric_summary, N, num_classes=4):
"""Processes the collected outputs to compute AP for action prediction.
Args:
outputs : [logits, labels]
global_step : global_step.
output_dir : where to store results.
metric_summary : summary object to add summaries to.
N : number of outputs to process.
num_classes : number of classes to compute AP over, and to reshape tensors.
"""
if N >= 0:
outputs = outputs[:N]
logits = np.concatenate(map(lambda x: x[0], outputs), axis=0).reshape((-1, num_classes))
labels = np.concatenate(map(lambda x: x[1], outputs), axis=0).reshape((-1, num_classes))
aps = []
for i in range(logits.shape[1]):
ap, rec, prec = utils.calc_pr(labels[:,i], logits[:,i])
ap = ap[0]
tf_utils.add_value_to_summary(metric_summary, 'aps/ap_{:d}: '.format(i), ap)
aps.append(ap)
return aps
def eval_dist(outputs, global_step, output_dir, metric_summary, N):
"""Processes the collected outputs during validation to
1. Plot the distance over time curve.
2. Compute mean and median distances.
3. Plots histogram of end distances.
Args:
outputs : [locs, goal_loc, gt_dist_to_goal].
global_step : global_step.
output_dir : where to store results.
metric_summary : summary object to add summaries to.
N : number of outputs to process.
"""
SUCCESS_THRESH = 3
if N >= 0:
outputs = outputs[:N]
# Plot distance at time t.
d_at_t = []
for i in range(len(outputs)):
locs, goal_loc, gt_dist_to_goal = outputs[i]
d_at_t.append(gt_dist_to_goal[:,:,0]*1)
# Plot the distance.
fig, axes = utils.subplot(plt, (1,1), (5,5))
d_at_t = np.concatenate(d_at_t, axis=0)
axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.')
axes.set_xlabel('time step')
axes.set_ylabel('dist to next goal')
axes.grid('on')
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step))
utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True)
plt.close(fig)
# Plot the trajectories and the init_distance and final distance.
d_inits = []
d_ends = []
for i in range(len(outputs)):
locs, goal_loc, gt_dist_to_goal = outputs[i]
d_inits.append(gt_dist_to_goal[:,0,0]*1)
d_ends.append(gt_dist_to_goal[:,-1,0]*1)
# Plot the distance.
fig, axes = utils.subplot(plt, (1,1), (5,5))
d_inits = np.concatenate(d_inits, axis=0)
d_ends = np.concatenate(d_ends, axis=0)
axes.plot(d_inits+np.random.rand(*(d_inits.shape))-0.5,
d_ends+np.random.rand(*(d_ends.shape))-0.5, '.', mec='red', mew=1.0)
axes.set_xlabel('init dist'); axes.set_ylabel('final dist');
axes.grid('on'); axes.axis('equal');
title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}'
title_str = title_str.format(
np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75),
100*(np.mean(d_ends <= SUCCESS_THRESH)))
axes.set_title(title_str)
file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step))
utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'],
overwrite=True)
plt.close(fig)
# Plot the histogram of the end_distance.
with plt.style.context('seaborn-white'):
d_ends_ = np.sort(d_ends)
d_inits_ = np.sort(d_inits)
leg = [];
fig, ax = utils.subplot(plt, (1,1), (5,5))
ax.grid('on')
ax.set_xlabel('Distance from goal'); ax.xaxis.label.set_fontsize(16);
ax.set_ylabel('Fraction of data'); ax.yaxis.label.set_fontsize(16);
ax.plot(d_ends_, np.arange(d_ends_.size)*1./d_ends_.size, 'r')
ax.plot(d_inits_, np.arange(d_inits_.size)*1./d_inits_.size, 'k')
leg.append('Final'); leg.append('Init');
ax.legend(leg, fontsize='x-large');
ax.set_axis_on()
title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}'
title_str = title_str.format(
np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75),
100*(np.mean(d_ends <= SUCCESS_THRESH)))
ax.set_title(title_str)
file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
# Log distance metrics.
tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ',
100*(np.mean(d_inits <= SUCCESS_THRESH)))
tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ',
100*(np.mean(d_ends <= SUCCESS_THRESH)))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ',
np.percentile(d_inits, q=75))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ',
np.percentile(d_ends, q=75))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ',
np.median(d_inits))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ',
np.median(d_ends))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ',
np.mean(d_inits))
tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ',
np.mean(d_ends))
return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \
np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \
100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH)
def plot_trajectories(outputs, global_step, output_dir, metric_summary, N):
"""Processes the collected outputs during validation to plot the trajectories
in the top view.
Args:
outputs : [locs, orig_maps, goal_loc].
global_step : global_step.
output_dir : where to store results.
metric_summary : summary object to add summaries to.
N : number of outputs to process.
"""
if N >= 0:
outputs = outputs[:N]
N = len(outputs)
plt.set_cmap('gray')
fig, axes = utils.subplot(plt, (N, outputs[0][1].shape[0]), (5,5))
axes = axes.ravel()[::-1].tolist()
for i in range(N):
locs, orig_maps, goal_loc = outputs[i]
is_semantic = np.isnan(goal_loc[0,0,1])
for j in range(orig_maps.shape[0]):
ax = axes.pop();
ax.plot(locs[j,0,0], locs[j,0,1], 'ys')
# Plot one by one, so that they come in different colors.
for k in range(goal_loc.shape[1]):
if not is_semantic:
ax.plot(goal_loc[j,k,0], goal_loc[j,k,1], 's')
if False:
ax.plot(locs[j,:,0], locs[j,:,1], 'r.', ms=3)
ax.imshow(orig_maps[j,0,:,:,0], origin='lower')
ax.set_axis_off();
else:
ax.scatter(locs[j,:,0], locs[j,:,1], c=np.arange(locs.shape[1]),
cmap='jet', s=10, lw=0)
ax.imshow(orig_maps[j,0,:,:,0], origin='lower', vmin=-1.0, vmax=2.0)
if not is_semantic:
xymin = np.minimum(np.min(goal_loc[j,:,:], axis=0), np.min(locs[j,:,:], axis=0))
xymax = np.maximum(np.max(goal_loc[j,:,:], axis=0), np.max(locs[j,:,:], axis=0))
else:
xymin = np.min(locs[j,:,:], axis=0)
xymax = np.max(locs[j,:,:], axis=0)
xy1 = (xymax+xymin)/2. - np.maximum(np.max(xymax-xymin), 12)
xy2 = (xymax+xymin)/2. + np.maximum(np.max(xymax-xymin), 12)
ax.set_xlim([xy1[0], xy2[0]])
ax.set_ylim([xy1[1], xy2[1]])
ax.set_axis_off()
file_name = os.path.join(output_dir, 'trajectory_{:d}.png'.format(global_step))
with fu.fopen(file_name, 'w') as f:
fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
plt.close(fig)
return None
def add_default_summaries(mode, arop_full_summary_iters, summarize_ops,
summarize_names, to_aggregate, action_prob_op,
input_tensors, scope_name):
assert(mode == 'train' or mode == 'val' or mode == 'test'), \
'add_default_summaries mode is neither train or val or test.'
s_ops = tf_utils.get_default_summary_ops()
if mode == 'train':
s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \
arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries(
summarize_ops, summarize_names, mode, to_aggregate=False,
scope_name=scope_name)
s_ops.additional_return_ops += additional_return_ops
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
elif mode == 'val':
s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \
arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries(
summarize_ops, summarize_names, mode, to_aggregate=to_aggregate,
scope_name=scope_name)
s_ops.additional_return_ops += additional_return_ops
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
elif mode == 'test':
s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \
arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries(
[], [], mode, to_aggregate=[], scope_name=scope_name)
s_ops.additional_return_ops += additional_return_ops
s_ops.arop_summary_iters += arop_summary_iters
s_ops.arop_eval_fns += arop_eval_fns
if mode == 'val':
arop = s_ops.additional_return_ops
arop += [[action_prob_op, input_tensors['train']['action']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['goal_loc'],
input_tensors['step']['gt_dist_to_goal']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['orig_maps'],
input_tensors['common']['goal_loc']]]
s_ops.arop_summary_iters += [-1, arop_full_summary_iters,
arop_full_summary_iters]
s_ops.arop_eval_fns += [eval_ap, eval_dist, plot_trajectories]
elif mode == 'test':
arop = s_ops.additional_return_ops
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['goal_loc'],
input_tensors['step']['gt_dist_to_goal']]]
arop += [[input_tensors['step']['gt_dist_to_goal']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['goal_loc'],
input_tensors['step']['gt_dist_to_goal'],
input_tensors['step']['node_ids'],
input_tensors['step']['perturbs']]]
arop += [[input_tensors['step']['loc_on_map'],
input_tensors['common']['orig_maps'],
input_tensors['common']['goal_loc']]]
s_ops.arop_summary_iters += [-1, -1, -1, arop_full_summary_iters]
s_ops.arop_eval_fns += [eval_dist, save_d_at_t, save_all,
plot_trajectories]
return s_ops
|
mit
|
huguesv/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/tqdm/__init__.py
|
8
|
1268
|
from ._tqdm import tqdm
from ._tqdm import trange
from ._tqdm_gui import tqdm_gui
from ._tqdm_gui import tgrange
from ._tqdm_pandas import tqdm_pandas
from ._main import main
from ._monitor import TMonitor, TqdmSynchronisationWarning
from ._version import __version__ # NOQA
from ._tqdm import TqdmTypeError, TqdmKeyError, TqdmWarning, \
TqdmDeprecationWarning, TqdmExperimentalWarning, \
TqdmMonitorWarning
__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
'TqdmTypeError', 'TqdmKeyError',
'TqdmWarning', 'TqdmDeprecationWarning',
'TqdmExperimentalWarning',
'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
'__version__']
def tqdm_notebook(*args, **kwargs): # pragma: no cover
"""See tqdm._tqdm_notebook.tqdm_notebook for full documentation"""
from ._tqdm_notebook import tqdm_notebook as _tqdm_notebook
return _tqdm_notebook(*args, **kwargs)
def tnrange(*args, **kwargs): # pragma: no cover
"""
A shortcut for tqdm_notebook(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
from ._tqdm_notebook import tnrange as _tnrange
return _tnrange(*args, **kwargs)
|
apache-2.0
|
dougalsutherland/skl-groups
|
skl_groups/tests/test_features.py
|
1
|
7051
|
from __future__ import division
from copy import copy, deepcopy
from functools import partial
import os
import sys
import warnings
import numpy as np
from sklearn.externals.six.moves import xrange, cPickle as pickle
from nose.tools import assert_raises
if __name__ == '__main__':
# make this copy of skl_groups importable
_this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(_this_dir)))
from skl_groups.features import Features
wrong_type = partial(assert_raises, (TypeError, IndexError))
################################################################################
def test_features_basic():
bags = [np.random.normal(size=(np.random.randint(10, 100), 10))
for _ in xrange(20)]
assert repr(Features([[[8, 9], [12, 12]]]))
feats = Features(bags, copy=False, stack=False, label=np.arange(20))
assert len(feats) == 20
assert feats.total_points == sum(bag.shape[0] for bag in bags)
assert np.all(feats[3] == bags[3])
assert np.all(feats.label == np.arange(20))
assert repr(feats)
assert feats.dtype == np.float64
assert feats != bags
assert feats.bare() == bags
sub = feats[[5, 2]]
assert np.all(sub.label == [5, 2])
assert np.all(sub[0] == feats[5])
assert np.all(sub[1] == feats[2])
assert repr(sub)
feats[4][0, 0] = 1000
assert bags[4][0, 0] == 1000
feats.make_stacked()
assert feats != bags
assert feats.bare() == bags
assert len(feats) == 20
assert feats.total_points == sum(bag.shape[0] for bag in bags)
assert np.all(feats[3] == bags[3])
assert np.all(feats.label == np.arange(20))
feats[0][0, 0] = -800
assert feats.features[0][0, 0] == -800
assert feats.stacked_features[0, 0] == -800
assert repr(feats)
wrong_type(lambda: feats['label'])
wrong_type(lambda: feats[['label']])
wrong_type(lambda: feats[[3, 'label']])
cop = feats.copy()
assert cop == feats
assert cop.stacked
cop.make_stacked()
assert cop == feats
cop[0][0, 0] = 12
assert cop != feats
assert repr(cop)
fs = lambda *a, **kw: partial(Features, *a, **kw)
bags = np.random.normal(size=(10, 5))
wrong_type(fs(bags))
wrong_type(fs(bags, [[4], [12]]))
wrong_type(fs(bags, []))
wrong_type(fs(bags, [-3, 13]))
wrong_type(fs(bags, [7.5, 2.5]))
wrong_type(fs(bags, [7, 2]))
wrong_type(fs(np.zeros((10, 0)), [7, 3]))
bags = [np.random.normal(size=(5, 8)), np.random.normal(size=(6, 8))]
wrong_type(fs(bags, [5, 6]))
assert np.all(
Features([[5, 6], [[7, 9], [0, 0]]])[0] == np.reshape([5, 6], (1, 2)))
wrong_type(fs([ [[[5]]] ]))
wrong_type(fs([["hello", "there"]]))
wrong_type(fs([[np.arange(10, dtype=int)], [np.arange(10, dtype=float)]]))
wrong_type(fs([np.random.randn(8, 7), np.random.randn(0, 7)]))
assert np.all(
Features([[[1, 2]], [[3, 4]]], stack=True).stacked_features
== [[1, 2], [3, 4]])
assert_raises(ValueError, fs(bags, labels=np.arange(3)))
with warnings.catch_warnings(record=True) as w:
Features(bags, total_points=[1, 2])
assert len(w) == 1
def test_copy_constructor():
bags = [np.random.normal(size=(np.random.randint(10, 100), 10))
for _ in xrange(20)]
unstacked = Features(bags, label=np.arange(20))
oth_unstacked = Features(unstacked)
assert oth_unstacked.label is unstacked.label
assert oth_unstacked.features[0] is unstacked.features[0]
assert oth_unstacked == unstacked
oth_unstacked_bare = Features(unstacked, bare=True)
assert oth_unstacked_bare == bags
assert oth_unstacked_bare.bare() == oth_unstacked_bare
oth_unstacked = Features(unstacked, label=np.ones(20))
assert np.all(oth_unstacked.label == 1)
oth_unstacked2 = Features(unstacked, bare=True, label=np.arange(20))
assert oth_unstacked2 == unstacked
oth_unstacked_copy = Features(unstacked, copy=True)
assert oth_unstacked_copy == unstacked
assert not np.may_share_memory(oth_unstacked_copy.features[0],
unstacked.features[0])
stacked = unstacked.copy()
stacked.make_stacked()
oth_stacked = Features(stacked)
assert oth_stacked == stacked
def test_copying():
bags = [np.random.normal(size=(np.random.randint(10, 100), 10))
for _ in xrange(20)]
unstacked = Features(bags, copy=False, stack=False, label=np.arange(20))
stacked = Features(bags, stack=True, label=np.arange(20))
assert unstacked == stacked
unstacked_copy = copy(unstacked)
assert not unstacked_copy.stacked
assert stacked == unstacked_copy == unstacked
assert unstacked_copy.label is unstacked.label
assert not np.may_share_memory(unstacked[0], unstacked_copy[0])
unstacked_deep = deepcopy(unstacked)
assert not unstacked_deep.stacked
assert stacked == unstacked_deep == unstacked
assert unstacked_deep.label is not unstacked.label
stacked_copy = copy(stacked)
assert stacked_copy.stacked
assert stacked == stacked_copy == unstacked
assert stacked_copy.label is stacked.label
stacked_deep = deepcopy(stacked)
assert stacked_deep.stacked
assert stacked == stacked_deep == unstacked
assert stacked_deep.label is not stacked.label
unstacked_stacked = unstacked.copy(stack=True)
assert unstacked_stacked.stacked
assert stacked == unstacked_stacked == stacked
assert unstacked_stacked.label is unstacked.label
unstacked_pickled = pickle.loads(pickle.dumps(unstacked))
assert unstacked == unstacked_pickled
assert not unstacked_pickled.stacked
assert unstacked_pickled.label is not unstacked.label
stacked_pickled = pickle.loads(pickle.dumps(stacked))
assert stacked == stacked_pickled
assert stacked_pickled.stacked
assert stacked_pickled.label is not stacked.label
def test_feats_add():
bags = [np.random.normal(size=(np.random.randint(10, 100), 10))
for _ in xrange(20)]
labels = np.arange(20)
first_15 = Features(bags[:15], labels=labels[:15])
last_5 = Features(bags[15:], labels=labels[15:])
plus = first_15 + last_5
assert len(plus) == 20
assert plus[:15] == first_15
plus_list = first_15 + bags[15:]
assert len(plus_list) == 20
assert not plus_list.meta
assert np.all(plus_list[16] == bags[16])
plus_singlelist = first_15 + [bags[18]]
assert np.all(plus_singlelist[15] == bags[18])
rplus_list = bags[15:] + first_15
assert np.all(rplus_list[0] == bags[15])
rplus_singlelist = [bags[15]] + first_15
assert np.all(rplus_singlelist[0] == bags[15])
assert rplus_singlelist[1:] == first_15.bare()
wrong_type(lambda: first_15 + 12)
wrong_type(lambda: 12 + first_15)
assert_raises(ValueError, lambda: first_15 + np.asarray(bags))
################################################################################
if __name__ == '__main__':
import nose
nose.main()
|
bsd-3-clause
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/pyRiemann-0.2.2/build/lib.linux-x86_64-2.7/pyriemann/clustering.py
|
2
|
5060
|
import numpy
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin, ClusterMixin
from sklearn.cluster.k_means_ import _init_centroids
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from .utils.mean import mean_covariance
from .utils.distance import distance
from .classification import MDM
#######################################################################
def _fit_single(
X,
y=None,
n_clusters=2,
init='random',
random_state=None,
metric='riemann',
max_iter=100,
tol=1e-4):
# init random state if provided
mdm = MDM(metric=metric)
mdm.covmeans = _init_centroids(
X, n_clusters, init, random_state=random_state)
if y is not None:
mdm.classes = numpy.unique(y)
else:
mdm.classes = numpy.arange(n_clusters)
labels = mdm.predict(X)
k = 0
while True:
old_labels = labels.copy()
mdm.fit(X, old_labels)
dist = mdm._predict_distances(X)
labels = mdm.classes[dist.argmin(axis=1)]
k += 1
if (k > max_iter) | (numpy.mean(labels == old_labels) > (1 - tol)):
break
inertia = sum([sum(dist[labels == mdm.classes[i], i])
for i in range(len(mdm.classes))])
return labels, inertia, mdm
#######################################################################
class Kmeans(BaseEstimator, ClassifierMixin, ClusterMixin, TransformerMixin):
def __init__(
self,
n_clusters=2,
max_iter=100,
metric='riemann',
random_state=None,
init='random',
n_init=10,
n_jobs=1,
tol=1e-4):
self.metric = metric
self.n_clusters = n_clusters
self.max_iter = max_iter
self.mdm = None
self.seed = random_state
self.init = init
self.n_init = n_init
self.tol = tol
self.n_jobs = n_jobs
def fit(self, X, y=None):
if (self.init is not 'random') | (self.n_init == 1):
# no need to iterate if init is not random
labels, inertia, mdm = _fit_single(X, y,
n_clusters=self.n_clusters,
init=self.init,
random_state=self.seed,
metric=self.metric,
max_iter=self.max_iter,
tol=self.tol)
else:
numpy.random.seed(self.seed)
seeds = numpy.random.randint(
numpy.iinfo(numpy.int32).max, size=self.n_init)
if self.n_jobs == 1:
res = []
for i in range(self.n_init):
res = _fit_single(X, y,
n_clusters=self.n_clusters,
init=self.init,
random_state=seeds[i],
metric=self.metric,
max_iter=self.max_iter,
tol=self.tol)
labels, inertia, mdm = zip(res)
else:
res = Parallel(n_jobs=self.n_jobs, verbose=0)(
delayed(_fit_single)(X, y,
n_clusters=self.n_clusters,
init=self.init,
random_state=seed,
metric=self.metric,
max_iter=self.max_iter,
tol=self.tol)
for seed in seeds)
labels, inertia, mdm = zip(*res)
best = numpy.argmin(inertia)
mdm = mdm[best]
labels = labels[best]
inertial = inertia[best]
self.mdm = mdm
self.inertia = inertia
self.labels_ = labels
return self
def predict(self, X):
return self.mdm.predict(X)
def transform(self, X):
return self.mdm.transform(X)
def covmeans(self):
return self.mdm.covmeans
#######################################################################
class KmeansPerClassTransform(BaseEstimator, TransformerMixin):
def __init__(self, n_clusters=2, **params):
params['n_clusters'] = n_clusters
self.km = Kmeans(**params)
self.metric = self.km.metric
self.covmeans = []
def fit(self, X, y):
self.classes = numpy.unique(y)
nclasses = len(self.classes)
for c in self.classes:
self.km.fit(X[y == c])
self.covmeans.extend(self.km.covmeans())
return self
def transform(self, X):
mdm = MDM(metric=self.metric)
mdm.covmeans = self.covmeans
return mdm._predict_distances(X)
|
bsd-3-clause
|
janelia-idf/elf
|
tests/filter.py
|
4
|
1137
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def update_line(num, data, line):
line.set_data(data[...,:num])
return line,
fig1 = plt.figure()
data = np.random.rand(2, 25)
l, = plt.plot([], [], 'r-')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('x')
plt.title('test')
line_ani = animation.FuncAnimation(fig1, update_line, 25, fargs=(data, l),
interval=50, blit=True)
#line_ani.save('lines.mp4')
fig2 = plt.figure()
x = np.arange(-9, 10)
y = np.arange(-9, 10).reshape(-1, 1)
base = np.hypot(x, y)
ims = []
for add in np.arange(15):
ims.append((plt.pcolor(x, y, base + add, norm=plt.Normalize(0, 30)),))
im_ani = animation.ArtistAnimation(fig2, ims, interval=50, repeat_delay=3000,
blit=True)
#im_ani.save('im.mp4', metadata={'artist':'Guido'})
plt.show()
# import matplotlib.pyplot as plot
# import numpy
# from numpy.polynomial.polynomial import polyfit,polyadd,Polynomial
# -----------------------------------------------------------------------------------------
# if __name__ == '__main__':
|
bsd-3-clause
|
mommermi/photometrypipeline
|
pp_photometry.py
|
2
|
20265
|
#!/usr/bin/env python3
""" PP_PHOTOMETRY - run curve-of-growth analysis on image files,
identify optimum aperture radius, and redo photometry
v1.0: 2015-12-30, [email protected]
"""
# Photometry Pipeline
# Copyright (C) 2016-2018 Michael Mommert, [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import numpy
import os
import sys
import logging
import argparse
from astropy.io import fits
import matplotlib
matplotlib.use('Agg')
from astroquery.jplhorizons import Horizons
# only import if Python3 is used
if sys.version_info > (3, 0):
from builtins import str
from builtins import range
# pipeline-specific modules
import _pp_conf
import pp_extract
from catalog import *
from toolbox import *
from diagnostics import photometry as diag
# setup logging
logging.basicConfig(filename=_pp_conf.log_filename,
level=_pp_conf.log_level,
format=_pp_conf.log_formatline,
datefmt=_pp_conf.log_datefmt)
def curve_of_growth_analysis(filenames, parameters,
nodeblending=False, display=False,
diagnostics=False):
output = {}
obsparam = parameters['obsparam']
logging.info('starting photometry with parameters: %s' %
(', '.join([('%s: %s' % (var, str(val))) for
var, val in list(locals().items())])))
# re-extract sources for curve-of-growth analysis
aprads = parameters['aprad']
if not isinstance(aprads, list) and not isinstance(aprads, numpy.ndarray):
print('need a list of aprads...')
os.abort()
logging.info('run pp_extract using %d apertures' % len(aprads))
print('* extract sources from %d images using %d apertures' %
(len(filenames), len(aprads)))
extractparameters = {'sex_snr': parameters['sex_snr'],
'source_minarea': parameters['source_minarea'],
'paramfile': _pp_conf.rootpath
+ '/setup/twentyapertures.sexparam',
'aprad': aprads, 'telescope': parameters['telescope'],
'nodeblending': nodeblending,
'quiet': False}
extraction = pp_extract.extract_multiframe(filenames, extractparameters)
extraction = [e for e in extraction if len(e) > 0]
# curve-of-growth analysis
# arrays for accumulating source information as a function of aprad
background_flux = [] # numpy.zeros(len(aprads))
target_flux = [] # numpy.zeros(len(aprads))
background_snr = [] # numpy.zeros(len(aprads))
target_snr = [] # numpy.zeros(len(aprads))
for filename in filenames:
if display:
print('processing curve-of-growth for frame %s' % filename)
if not parameters['background_only']:
hdu = fits.open(filename, ignore_missing_end=True)
# pull target coordinates from Horizons
targetname = hdu[0].header[obsparam['object']]
if parameters['manobjectname'] is not None:
targetname = parameters['manobjectname'].translate(
_pp_conf.target2filename)
image = hdu[0].data
# derive MIDTIMJD, if not yet in the FITS header
obsparam = parameters['obsparam']
if not 'MIDTIMJD' in hdu[0].header:
exptime = float(hdu[0].header[obsparam['exptime']])
if obsparam['date_keyword'].find('|') == -1:
date = hdu[0].header[obsparam['date_keyword']]
date = dateobs_to_jd(date) + exptime/2./86400.
else:
date_key = obsparam['date_keyword'].split('|')[0]
time_key = obsparam['date_keyword'].split('|')[1]
date = hdu[0].header[date_key]+'T' +\
hdu[0].header[time_key]
date = dateobs_to_jd(date) + exptime/2./86400.
else:
date = hdu[0].header['MIDTIMJD']
# call HORIZONS to get target coordinates
obj = Horizons(targetname.replace('_', ' '),
epochs=date,
location=str(obsparam['observatory_code']))
try:
eph = obj.ephemerides()
n = len(eph)
except ValueError:
print('Target (%s) not a small body' % targetname)
logging.warning('Target (%s) not a small body' % targetname)
n = None
if n is None or n == 0:
logging.warning('WARNING: No position from Horizons!' +
'Name (%s) correct?' % targetname)
logging.warning('HORIZONS call: %s' % obj.uri)
logging.info('proceeding with background sources analysis')
parameters['background_only'] = True
else:
logging.info('ephemerides for %s pulled from Horizons' %
targetname)
target_ra, target_dec = eph[0]['RA'], eph[0]['DEC']
# pull data from LDAC file
ldac_filename = filename[:filename.find('.fit')]+'.ldac'
data = catalog('Sextractor_LDAC')
data.read_ldac(ldac_filename, maxflag=3)
if data.shape[0] == 0:
continue
# identify target and extract its curve-of-growth
n_target_identified = 0
if not parameters['background_only']:
residuals = numpy.sqrt((data['ra_deg']-target_ra)**2 +
(data['dec_deg']-target_dec)**2)
target_idx = numpy.argmin(residuals)
if residuals[target_idx] > _pp_conf.pos_epsilon/3600:
logging.warning(('WARNING: frame %s, large residual to ' +
'HORIZONS position of %s: %f arcsec; ' +
'ignore this frame') %
(filename, targetname,
residuals[numpy.argmin(residuals)]*3600.))
else:
target_flux.append(data[target_idx]['FLUX_'+_pp_conf.photmode] /
max(data[target_idx][
'FLUX_'+_pp_conf.photmode]))
target_snr.append(
data[target_idx]['FLUX_'+_pp_conf.photmode] /
data[target_idx]['FLUXERR_'+_pp_conf.photmode] /
max(data[target_idx]['FLUX_'+_pp_conf.photmode] /
data[target_idx]['FLUXERR_'+_pp_conf.photmode]))
n_target_identified += 1
# extract background source fluxes and snrs
# assume n_background_sources >> 1, do not reject target
if not parameters['target_only']:
# n_src = data.shape[0] # use all sources
n_src = 50 # use only 50 sources
for idx, src in enumerate(data.data[:n_src]):
if (numpy.any(numpy.isnan(src['FLUX_'+_pp_conf.photmode])) or
numpy.any(numpy.isnan(src['FLUXERR_'+_pp_conf.photmode]))
or src['FLAGS'] > 3):
continue
# create growth curve
background_flux.append(src['FLUX_'+_pp_conf.photmode] /
max(src['FLUX_'+_pp_conf.photmode]))
background_snr.append(src['FLUX_'+_pp_conf.photmode] /
src['FLUXERR_'+_pp_conf.photmode] /
max(src['FLUX_'+_pp_conf.photmode] /
src['FLUXERR_'+_pp_conf.photmode]))
# investigate curve-of-growth
logging.info('investigate curve-of-growth based on %d frames' %
len(filenames))
# combine results
n_target = len(target_flux)
if n_target > 0:
target_flux = (numpy.median(target_flux, axis=0),
numpy.std(target_flux, axis=0)/numpy.sqrt(n_target))
target_snr = numpy.median(target_snr, axis=0)
else:
target_flux = (numpy.zeros(len(aprads)), numpy.zeros(len(aprads)))
target_snr = numpy.zeros(len(aprads))
n_background = len(background_flux)
if n_background > 0:
background_flux = (numpy.median(background_flux, axis=0),
numpy.std(background_flux, axis=0) /
numpy.sqrt(n_background))
background_snr = numpy.median(background_snr, axis=0)
else:
background_flux = (numpy.zeros(len(aprads)), numpy.zeros(len(aprads)))
background_snr = numpy.zeros(len(aprads))
if n_target == 0:
logging.info('No target fluxes available, using background sources, ' +
'only')
parameters['background_only'] = True
if n_background == 0:
logging.info('No background fluxes available, using target, only')
parameters['target_only'] = True
# find optimum aperture radius
if parameters['target_only']:
aprad_strategy = 'smallest target aprad that meets fluxlimit criterion'
optimum_aprad_idx = numpy.argmin(numpy.fabs(target_flux[0] -
_pp_conf.fluxlimit_aprad))
elif parameters['background_only']:
aprad_strategy = 'smallest background aprad that meets fluxlimit ' + \
'criterion'
optimum_aprad_idx = numpy.argmin(numpy.fabs(background_flux[0] -
_pp_conf.fluxlimit_aprad))
else:
# flux_select: indices where target+background fluxes > fluxlimit
flux_select = numpy.where((target_flux[0] > _pp_conf.fluxlimit_aprad) &
(background_flux[0] > _pp_conf.fluxlimit_aprad))[0]
flux_res = numpy.fabs(target_flux[0][flux_select] -
background_flux[0][flux_select])
if numpy.min(flux_res) < _pp_conf.fluxmargin_aprad:
aprad_strategy = 'target+background fluxes > fluxlimit, ' + \
'flux difference < margin'
optimum_aprad_idx = flux_select[numpy.where(flux_res <
_pp_conf.fluxmargin_aprad)[0][0]]
else:
aprad_strategy = 'target+background fluxes > fluxlimit, ' + \
'flux difference minimal'
optimum_aprad_idx = flux_select[numpy.argmin(flux_res)]
optimum_aprad = parameters['aprad'][optimum_aprad_idx]
output['aprad_strategy'] = aprad_strategy
output['optimum_aprad'] = optimum_aprad
output['pos_epsilon'] = _pp_conf.pos_epsilon
output['fluxlimit_aprad'] = _pp_conf.fluxlimit_aprad
output['fluxmargin_aprad'] = _pp_conf.fluxmargin_aprad
output['n_target'] = len(target_flux[0])
output['n_bkg'] = len(background_flux[0])
output['target_flux'] = target_flux
output['target_snr'] = target_snr
output['background_flux'] = background_flux
output['background_snr'] = background_snr
output['parameters'] = parameters
# write results to file
outf = open('aperturephotometry_curveofgrowth.dat', 'w')
outf.writelines('# background target flux\n' +
'# rad flux sigma snr flux sigma snr residual\n')
for i in range(len(parameters['aprad'])):
outf.writelines(('%5.2f %5.3f %5.3f %4.2f %6.3f %5.3f %4.2f ' +
'%6.3f\n') %
(parameters['aprad'][i], background_flux[0][i],
background_flux[1][i], background_snr[i],
target_flux[0][i], target_flux[1][i],
target_snr[i],
target_flux[0][i]-background_flux[0][i]))
outf.close()
# extraction content
#
# -> see pp_extract.py
#
###
# output content
#
# { 'aprad_strategy' : optimum aperture finding strategy,
# 'optimum_aprad' : optimum aperature radius,
# 'pos_epsilon' : required positional uncertainty ("),
# 'fluxlimit_aprad' : min flux for both target and background,
# 'fluxmargin_aprad': max flux difference between target and background,
# 'n_target' : number of frames with target flux measurements,
# 'n_bkg' : number of frames with background measurements,
# 'target_flux' : target fluxes as a function of aprad,
# 'target_snr' : target snrs as a function of aprad,
# 'background_flux' : background fluxes as a function of aprad,
# 'background_snr' : background snrs as a function of aprad,
# 'parameters' : source extractor parameters
# }
###
# diagnostics
if diagnostics:
if display:
print('creating diagnostic output')
logging.info(' ~~~~~~~~~ creating diagnostic output')
diag.add_photometry(output, extraction)
# update image headers
for filename in filenames:
hdu = fits.open(filename, mode='update', ignore_missing_end=True)
hdu[0].header['APRAD'] = (optimum_aprad, 'aperture phot radius (px)')
hdu[0].header['APIDX'] = (optimum_aprad_idx, 'optimum aprad index')
hdu.flush()
hdu.close()
# display results
if display:
print('\n#################################### PHOTOMETRY SUMMARY:\n###')
print('### best-fit aperture radius %5.2f (px)' % (optimum_aprad))
print('###\n#####################################################\n')
logging.info('==> best-fit aperture radius: %3.1f (px)' % (optimum_aprad))
return output
def photometry(filenames, sex_snr, source_minarea, aprad,
manobjectname, background_only, target_only,
telescope, obsparam, nodeblending=False,
display=False,
diagnostics=False):
"""
wrapper for photometry analysis
"""
# photometry parameters
photpar = {'sex_snr': sex_snr,
'source_minarea': source_minarea,
'manobjectname': manobjectname,
'background_only': background_only,
'target_only': target_only,
'obsparam': obsparam,
'telescope': telescope,
'nodeblending': nodeblending,
'quiet': not display}
# do curve-of-growth analysis if aprad not provided
for filename in filenames:
hdu = fits.open(filename, mode='update',
ignore_missing_end=True)
hdu[0].header['PHOTMODE'] = (_pp_conf.photmode,
'PP photometry mode')
hdu.flush()
hdu.close()
if _pp_conf.photmode == 'APER':
if aprad is None:
# aperture radius list
aprads = numpy.linspace(obsparam['aprad_range'][0],
obsparam['aprad_range'][1], 20)
photpar['aprad'] = aprads
cog = curve_of_growth_analysis(filenames, photpar,
nodeblending=nodeblending,
display=display,
diagnostics=diagnostics)
aprad = cog['optimum_aprad']
else:
# add manually selected aprad to image headers
for filename in filenames:
hdu = fits.open(filename, mode='update',
ignore_missing_end=True)
hdu[0].header['APRAD'] = (aprad,
'manual aperture phot radius (px)')
hdu.flush()
hdu.close()
# run extract using (optimum) aprad
photpar['aprad'] = round(aprad, 2)
photpar['paramfile'] = (_pp_conf.rootpath +
'/setup/singleaperture.sexparam')
logging.info('extract sources using optimum aperture from %d images' %
len(filenames))
if display:
print(('* extract sources from %d images using aperture '
+ 'radius %4.2fpx') %
(len(filenames), aprad))
else:
photpar['aprad'] = None
photpar['paramfile'] = (_pp_conf.rootpath +
'/setup/singleaperture.sexparam')
logging.info('extract sources using ' + _pp_conf.photmode +
' photometry')
if display:
print(('* extract sources from %d images using '
+ _pp_conf.photmode + ' photometry') %
len(filenames))
photpar['photmode'] = _pp_conf.photmode
pp_extract.extract_multiframe(filenames, photpar)
logging.info('Done! -----------------------------------------------------')
if 'cog' in list(locals().keys()):
return cog
else:
return None
# MAIN
if __name__ == '__main__':
# define command line arguments
parser = argparse.ArgumentParser(description='automated photometry')
parser.add_argument('-snr', help='sextractor SNR threshold for ' +
'photometry catalog', default=2)
parser.add_argument('-minarea', help='sextractor SNR threshold for ' +
'photometry catalog', default=0)
parser.add_argument('-aprad', help='aperture radius for photometry (px)',
default=None)
parser.add_argument('-target',
help='object name override (e.g., 2015_AB123)',
default=None)
parser.add_argument('-background_only',
help='find aperture for background only',
action="store_true")
parser.add_argument('-target_only', help='find aperture for target only',
action="store_true")
parser.add_argument('images', help='images to process', nargs='+')
parser.add_argument('-nodeblending',
help='deactivate deblending in source extraction',
action="store_true")
args = parser.parse_args()
sex_snr = float(args.snr)
source_minarea = float(args.minarea)
aprad = float(args.aprad) if args.aprad is not None else None
manobjectname = args.target
background_only = args.background_only
target_only = args.target_only
nodeblending = args.nodeblending
filenames = args.images
# check if input filenames is actually a list
if len(filenames) == 1:
if filenames[0].find('.lst') > -1 or filenames[0].find('.list') > -1:
filenames = [filename[:-1] for filename in open(filenames[0], 'r')
.readlines()]
# obtain telescope information
hdu = fits.open(filenames[0], ignore_missing_end=True)
try:
telescope = hdu[0].header['TEL_KEYW']
except KeyError:
print('ERROR: cannot find telescope keyword in image header;' +
'has this image run through pp_prepare?')
sys.exit(0)
obsparam = _pp_conf.telescope_parameters[telescope]
if type(manobjectname) == str:
manobjectname = manobjectname.translate(_pp_conf.target2filename)
# set minarea from obsparam
if source_minarea == 0:
source_minarea = obsparam['source_minarea']
phot = photometry(filenames, sex_snr, source_minarea, aprad,
manobjectname, background_only, target_only,
telescope, obsparam,
nodeblending=nodeblending, display=True,
diagnostics=True)
|
gpl-3.0
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 0.8s/hmm_crossvalidation_force_10_states.py
|
1
|
27332
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv2 import Fmat_original_hshv
from data_variable_hslv2 import Fmat_original_hslv
from data_variable_lshv2 import Fmat_original_lshv
from data_variable_lslv2 import Fmat_original_lslv
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.1] * 10
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_hshv,sigma_rf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15]))))
mu_rm_hshv,sigma_rm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28]))))
mu_sf_hshv,sigma_sf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37]))))
mu_sm_hshv,sigma_sm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = np.zeros((10,2))
B_rm_hshv = np.zeros((10,2))
B_sf_hshv = np.zeros((10,2))
B_sm_hshv = np.zeros((10,2))
for num_states in range(10):
B_rf_hshv[num_states,0] = mu_rf_hshv[num_states]
B_rf_hshv[num_states,1] = sigma_rf_hshv[num_states]
B_rm_hshv[num_states,0] = mu_rm_hshv[num_states]
B_rm_hshv[num_states,1] = sigma_rm_hshv[num_states]
B_sf_hshv[num_states,0] = mu_sf_hshv[num_states]
B_sf_hshv[num_states,1] = sigma_sf_hshv[num_states]
B_sm_hshv[num_states,0] = mu_sm_hshv[num_states]
B_sm_hshv[num_states,1] = sigma_sm_hshv[num_states]
B_rf_hshv = B_rf_hshv.tolist()
B_rm_hshv = B_rm_hshv.tolist()
B_sf_hshv = B_sf_hshv.tolist()
B_sm_hshv = B_sm_hshv.tolist()
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15])))
total_seq_rm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28])))
total_seq_sf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37])))
total_seq_sm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45])))
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = Fmat_original_hshv[0:81,:]
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[0:81,k]).T).tolist()
new_test_seq_obj_hshv = np.array(sum(test_seq_obj_hshv,[]))
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,15:26])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,26:33])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,15:26])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,26:33])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,15:26])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,26:33])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,15:26])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,26:33])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_hslv,sigma_rf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15]))))
mu_rm_hslv,sigma_rm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28]))))
mu_sf_hslv,sigma_sf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37]))))
mu_sm_hslv,sigma_sm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = np.zeros((10,2))
B_rm_hslv = np.zeros((10,2))
B_sf_hslv = np.zeros((10,2))
B_sm_hslv = np.zeros((10,2))
for num_states in range(10):
B_rf_hslv[num_states,0] = mu_rf_hslv[num_states]
B_rf_hslv[num_states,1] = sigma_rf_hslv[num_states]
B_rm_hslv[num_states,0] = mu_rm_hslv[num_states]
B_rm_hslv[num_states,1] = sigma_rm_hslv[num_states]
B_sf_hslv[num_states,0] = mu_sf_hslv[num_states]
B_sf_hslv[num_states,1] = sigma_sf_hslv[num_states]
B_sm_hslv[num_states,0] = mu_sm_hslv[num_states]
B_sm_hslv[num_states,1] = sigma_sm_hslv[num_states]
B_rf_hslv = B_rf_hslv.tolist()
B_rm_hslv = B_rm_hslv.tolist()
B_sf_hslv = B_sf_hslv.tolist()
B_sm_hslv = B_sm_hslv.tolist()
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15])))
total_seq_rm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28])))
total_seq_sf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37])))
total_seq_sm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45])))
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = Fmat_original_hslv[0:81,:]
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[0:81,k]).T).tolist()
new_test_seq_obj_hslv = np.array(sum(test_seq_obj_hslv,[]))
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:56])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:56])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:56])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:56])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_lshv,sigma_rf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lslv[0:81,0:15]))))
mu_rm_lshv,sigma_rm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lslv[0:81,15:28]))))
mu_sf_lshv,sigma_sf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lslv[0:81,28:37]))))
mu_sm_lshv,sigma_sm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lslv[0:81,37:45]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = np.zeros((10,2))
B_rm_lshv = np.zeros((10,2))
B_sf_lshv = np.zeros((10,2))
B_sm_lshv = np.zeros((10,2))
for num_states in range(10):
B_rf_lshv[num_states,0] = mu_rf_lshv[num_states]
B_rf_lshv[num_states,1] = sigma_rf_lshv[num_states]
B_rm_lshv[num_states,0] = mu_rm_lshv[num_states]
B_rm_lshv[num_states,1] = sigma_rm_lshv[num_states]
B_sf_lshv[num_states,0] = mu_sf_lshv[num_states]
B_sf_lshv[num_states,1] = sigma_sf_lshv[num_states]
B_sm_lshv[num_states,0] = mu_sm_lshv[num_states]
B_sm_lshv[num_states,1] = sigma_sm_lshv[num_states]
B_rf_lshv = B_rf_lshv.tolist()
B_rm_lshv = B_rm_lshv.tolist()
B_sf_lshv = B_sf_lshv.tolist()
B_sm_lshv = B_sm_lshv.tolist()
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lslv[0:81,0:15])))
total_seq_rm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lslv[0:81,15:28])))
total_seq_sf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lslv[0:81,28:37])))
total_seq_sm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lslv[0:81,37:45])))
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = Fmat_original_lshv[0:81,:]
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[0:81,k]).T).tolist()
new_test_seq_obj_lshv = np.array(sum(test_seq_obj_lshv,[]))
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:23])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,23:32])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:23])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,23:32])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:23])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,23:32])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:23])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,23:32])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_lslv,sigma_rf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15]))))
mu_rm_lslv,sigma_rm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16]))))
mu_sf_lslv,sigma_sf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23]))))
mu_sm_lslv,sigma_sm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = np.zeros((10,2))
B_rm_lslv = np.zeros((10,2))
B_sf_lslv = np.zeros((10,2))
B_sm_lslv = np.zeros((10,2))
for num_states in range(10):
B_rf_lslv[num_states,0] = mu_rf_lslv[num_states]
B_rf_lslv[num_states,1] = sigma_rf_lslv[num_states]
B_rm_lslv[num_states,0] = mu_rm_lslv[num_states]
B_rm_lslv[num_states,1] = sigma_rm_lslv[num_states]
B_sf_lslv[num_states,0] = mu_sf_lslv[num_states]
B_sf_lslv[num_states,1] = sigma_sf_lslv[num_states]
B_sm_lslv[num_states,0] = mu_sm_lslv[num_states]
B_sm_lslv[num_states,1] = sigma_sm_lslv[num_states]
B_rf_lslv = B_rf_lslv.tolist()
B_rm_lslv = B_rm_lslv.tolist()
B_sf_lslv = B_sf_lslv.tolist()
B_sm_lslv = B_sm_lslv.tolist()
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15])))
total_seq_rm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16])))
total_seq_sf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23])))
total_seq_sm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32])))
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = Fmat_original_lslv[0:81,:]
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[0:81,k]).T).tolist()
new_test_seq_obj_lslv = np.array(sum(test_seq_obj_lslv,[]))
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
# Find Viterbi Path
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:37])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,37:45])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:37])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,37:45])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:37])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,37:45])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:37])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,37:45])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_10_states.png')
pp.show()
|
mit
|
fabioticconi/scikit-learn
|
sklearn/manifold/isomap.py
|
50
|
7515
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
q1ang/scikit-learn
|
sklearn/feature_selection/variance_threshold.py
|
238
|
2594
|
# Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
|
bsd-3-clause
|
MIPS/external-chromium_org
|
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
|
24
|
10036
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
|
bsd-3-clause
|
luo66/scikit-learn
|
sklearn/feature_selection/rfe.py
|
64
|
17509
|
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
bsd-3-clause
|
chrsrds/scikit-learn
|
sklearn/ensemble/iforest.py
|
2
|
18270
|
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
from scipy.sparse import issparse
from warnings import warn
from ..tree import ExtraTreeRegressor
from ..utils import (
check_random_state,
check_array,
gen_batches,
get_chunk_n_rows,
)
from ..utils.fixes import _joblib_parallel_args
from ..utils.validation import check_is_fitted, _num_samples
from ..base import OutlierMixin
from .bagging import BaseBagging
__all__ = ["IsolationForest"]
class IsolationForest(BaseBagging, OutlierMixin):
"""Isolation Forest Algorithm
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, optional (default=100)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default="auto")
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, optional (default='auto')
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range [0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=False)
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
behaviour : str, default='deprecated'
This parameter has not effect, is deprecated, and will be removed.
.. versionadded:: 0.20
``behaviour`` is added in 0.20 for back-compatibility purpose.
.. deprecated:: 0.20
``behaviour='old'`` is deprecated in 0.20 and will not be possible
in 0.22.
.. deprecated:: 0.22
``behaviour`` parameter is deprecated in 0.22 and removed in
0.24.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.21
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : integer
The actual number of samples
offset_ : float
Offset used to define the decision function from the raw scores. We
have the relation: ``decision_function = score_samples - offset_``.
``offset_`` is defined as follows. When the contamination parameter is
set to "auto", the offset is equal to -0.5 as the scores of inliers are
close to 0 and the scores of outliers are close to -1. When a
contamination parameter different than "auto" is provided, the offset
is defined in such a way we obtain the expected number of outliers
(samples with decision function < 0) in training.
Notes
-----
The implementation is based on an ensemble of ExtraTreeRegressor. The
maximum depth of each tree is set to ``ceil(log_2(n))`` where
:math:`n` is the number of samples used to build the tree
(see (Liu et al., 2008) for more details).
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
"""
def __init__(self,
n_estimators=100,
max_samples="auto",
contamination="auto",
max_features=1.,
bootstrap=False,
n_jobs=None,
behaviour='deprecated',
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(
max_features=1,
splitter='random',
random_state=random_state),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.behaviour = behaviour
self.contamination = contamination
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def _parallel_args(self):
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
# a thread-based backend rather than a process-based backend so as
# to avoid suffering from communication overhead and extra memory
# copies.
return _joblib_parallel_args(prefer='threads')
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
y : Ignored
not used, present for API consistency by convention.
Returns
-------
self : object
"""
if self.behaviour != 'deprecated':
if self.behaviour == 'new':
warn(
"'behaviour' is deprecated in 0.22 and will be removed "
"in 0.24. You should not pass or set this parameter.",
DeprecationWarning
)
else:
raise NotImplementedError(
"The old behaviour of IsolationForest is not implemented "
"anymore. Remove the 'behaviour' parameter."
)
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, str):
if self.max_samples == 'auto':
max_samples = min(256, n_samples)
else:
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not (0. < self.max_samples <= 1.):
raise ValueError("max_samples must be in (0, 1], got %r"
% self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(X, y, max_samples,
max_depth=max_depth,
sample_weight=sample_weight)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X),
100. * self.contamination)
return self
def predict(self, X):
"""Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : array, shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self, ["offset_"])
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
return is_inlier
def decision_function(self, X):
"""Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : array, shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples.
Returns
-------
scores : array, shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self, ["estimators_"])
# Check data
X = check_array(X, accept_sparse='csr')
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# Take the opposite of the scores as bigger is better (here less
# abnormal)
return -self._compute_chunked_score_samples(X)
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
# We get as many rows as possible within our working_memory budget
# (defined by sklearn.get_config()['working_memory']) to store
# self._max_features in each row during computation.
#
# Note:
# - this will get at least 1 row, even if 1 row of score will
# exceed working_memory.
# - this does only account for temporary memory usage while loading
# the data needed to compute the scores -- the returned scores
# themselves are 1D.
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * self._max_features,
max_n_rows=n_samples)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order="f")
for sl in slices:
# compute score on the slices of test samples:
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
return scores
def _compute_score_samples(self, X, subsample_features):
"""Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
subsample_features : bool,
whether features should be subsampled
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
for tree, features in zip(self.estimators_, self.estimators_features_):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset)
n_samples_leaf = tree.tree_.n_node_samples[leaves_index]
depths += (
np.ravel(node_indicator.sum(axis=1))
+ _average_path_length(n_samples_leaf)
- 1.0
)
scores = 2 ** (
-depths
/ (len(self.estimators_)
* _average_path_length([self.max_samples_]))
)
return scores
def _average_path_length(n_samples_leaf):
"""The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like, shape (n_samples,).
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : array, same shape as n_samples_leaf
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.
average_path_length[mask_2] = 1.
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
|
bsd-3-clause
|
kyleam/pymc3
|
pymc/examples/ARM12_6.py
|
2
|
1807
|
import numpy as np
from pymc import *
import pandas as pd
data = pd.read_csv(get_data_file('pymc.examples', 'data/srrs2.dat'))
cty_data = pd.read_csv(get_data_file('pymc.examples', 'data/cty.dat'))
data = data[data.state == 'MN']
data['fips'] = data.stfips * 1000 + data.cntyfips
cty_data['fips'] = cty_data.stfips * 1000 + cty_data.ctfips
data['lradon'] = np.log(np.where(data.activity == 0, .1, data.activity))
data = data.merge(cty_data, 'inner', on='fips')
unique = data[['fips']].drop_duplicates()
unique['group'] = np.arange(len(unique))
unique.set_index('fips')
data = data.merge(unique, 'inner', on='fips')
obs_means = data.groupby('fips').lradon.mean()
n = len(obs_means)
lradon = np.array(data.lradon)
floor = np.array(data.floor)
group = np.array(data.group)
model = Model()
with model:
groupmean = Normal('groupmean', 0, 10. ** -2.)
# as recommended by "Prior distributions for variance parameters in
# hierarchical models"
groupsd = Uniform('groupsd', 0, 10.)
sd = Uniform('sd', 0, 10.)
floor_m = Normal('floor_m', 0, 5. ** -2.)
means = Normal('means', groupmean, groupsd ** -2., shape=n)
lr = Normal(
'lr', floor * floor_m + means[group], sd ** -2., observed=lradon)
def run(n=3000):
if n == "short":
n = 50
with model:
start = {'groupmean': obs_means.mean(),
'groupsd': obs_means.std(),
'sd': data.groupby('group').lradon.std().mean(),
'means': np.array(obs_means),
'floor_m': 0.,
}
start = find_MAP(start, [groupmean, sd, floor_m])
H = model.fastd2logp()
h = np.diag(H(start))
step = HamiltonianMC(model.vars, h)
trace = sample(n, step, start)
if __name__ == '__main__':
run()
|
apache-2.0
|
wvangeit/AllenSDK
|
doc_template/examples/multicell/multi.py
|
3
|
1070
|
from allensdk.model.biophys_sim.config import Config
from utils import Utils
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
config = Config().load('config.json')
# configure NEURON
utils = Utils(config)
h = utils.h
# configure model
manifest = config.manifest
utils.generate_cells()
utils.connect_cells()
# configure stimulus
utils.setup_iclamp_step(utils.cells[0], 0.27, 1020.0, 750.0)
h.dt = 0.025
h.tstop = 3000
# configure recording
vec = utils.record_values()
# run the model
h.finitialize()
h.run()
# save output voltage to text file
data = np.transpose(np.vstack((vec["t"],
vec["v"][0],
vec["v"][1],
vec["v"][2])))
np.savetxt('multicell.dat', data)
# use matplotlib to plot to png image
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True)
for i in range(len(utils.cells)):
axes[i].plot(vec["t"], vec["v"][i])
axes[i].set_title(utils.cells_data[i]["type"])
plt.tight_layout()
plt.savefig('multicell.png')
|
gpl-3.0
|
cl4rke/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
joequant/zipline
|
tests/test_algorithm_gen.py
|
18
|
7339
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import (
timed,
nottest
)
from datetime import datetime
import pandas as pd
import pytz
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.finance import slippage
from zipline.utils import factory
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class RecordDateSlippage(slippage.FixedSlippage):
def __init__(self, spread):
super(RecordDateSlippage, self).__init__(spread=spread)
self.latest_date = None
def simulate(self, event, open_orders):
self.latest_date = event.dt
result = super(RecordDateSlippage, self).simulate(event, open_orders)
return result
class TestAlgo(TradingAlgorithm):
def __init__(self, asserter, *args, **kwargs):
super(TestAlgo, self).__init__(*args, **kwargs)
self.asserter = asserter
def initialize(self, window_length=100):
self.latest_date = None
self.set_slippage(RecordDateSlippage(spread=0.05))
self.stocks = [self.sid(8229)]
self.ordered = False
self.num_bars = 0
def handle_data(self, data):
self.num_bars += 1
self.latest_date = self.get_datetime()
if not self.ordered:
for stock in self.stocks:
self.order(stock, 100)
self.ordered = True
else:
self.asserter.assertGreaterEqual(
self.latest_date,
self.slippage.latest_date
)
class AlgorithmGeneratorTestCase(TestCase):
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@nottest
def test_lse_algorithm(self):
lse = trading.TradingEnvironment(
bm_symbol='^FTSE',
exchange_tz='Europe/London'
)
with lse:
sim_params = factory.create_simulation_parameters(
start=datetime(2012, 5, 1, tzinfo=pytz.utc),
end=datetime(2012, 6, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
200,
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(len(results), 42)
# May 7, 2012 was an LSE holiday, confirm the 4th trading
# day was May 8.
self.assertEqual(results[4]['daily_perf']['period_open'],
datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc))
@timed(DEFAULT_TIMEOUT)
def test_generator_dates(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2011, 7, 30, tzinfo=pytz.utc),
end=datetime(2012, 7, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
self.assertTrue(list(gen))
self.assertTrue(algo.slippage.latest_date)
self.assertTrue(algo.latest_date)
@timed(DEFAULT_TIMEOUT)
def test_handle_data_on_market(self):
"""
Ensure that handle_data is only called on market minutes.
i.e. events that come in at midnight should be processed at market
open.
"""
from zipline.finance.trading import SimulationParameters
sim_params = SimulationParameters(
period_start=datetime(2012, 7, 30, tzinfo=pytz.utc),
period_end=datetime(2012, 7, 30, tzinfo=pytz.utc),
data_frequency='minute'
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
midnight_custom_source = [Event({
'custom_field': 42.0,
'sid': 'custom_data',
'source_id': 'TestMidnightSource',
'dt': pd.Timestamp('2012-07-30', tz='UTC'),
'type': DATASOURCE_TYPE.CUSTOM
})]
minute_event_source = [Event({
'volume': 100,
'price': 200.0,
'high': 210.0,
'open_price': 190.0,
'low': 180.0,
'sid': 8229,
'source_id': 'TestMinuteEventSource',
'dt': pd.Timestamp('2012-07-30 9:31 AM', tz='US/Eastern').
tz_convert('UTC'),
'type': DATASOURCE_TYPE.TRADE
})]
algo.set_sources([midnight_custom_source, minute_event_source])
gen = algo.get_generator()
# Consume the generator
list(gen)
# Though the events had different time stamps, handle data should
# have only been called once, at the market open.
self.assertEqual(algo.num_bars, 1)
@timed(DEFAULT_TIMEOUT)
def test_progress(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2008, 1, 1, tzinfo=pytz.utc),
end=datetime(2008, 1, 5, tzinfo=pytz.utc)
)
algo = TestAlgo(self, sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(results[-2]['progress'], 1.0)
def test_benchmark_times_match_market_close_for_minutely_data(self):
"""
Benchmark dates should be adjusted so that benchmark events are
emitted at the end of each trading day when working with minutely
data.
Verification relies on the fact that there are no trades so
algo.datetime should be equal to the last benchmark time.
See https://github.com/quantopian/zipline/issues/241
"""
sim_params = create_simulation_parameters(num_days=1,
data_frequency='minute')
algo = TestAlgo(self, sim_params=sim_params, identifiers=[8229])
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.datetime, sim_params.last_close)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.