repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kobejean/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 46 | 13101 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
anirudhjayaraman/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 74 | 8472 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
harisbal/pandas | pandas/tests/io/test_pytables.py | 1 | 215736 | import pytest
import os
import tempfile
from contextlib import contextmanager
from warnings import catch_warnings, simplefilter
from distutils.version import LooseVersion
import datetime
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
RangeIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex,
isna, compat, concat, Timestamp)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
set_timezone)
from pandas.compat import (is_platform_windows, is_platform_little_endian,
PY35, PY36, BytesIO, text_type,
range, lrange, u)
from pandas.io.formats.printing import pprint_thing
from pandas.core.dtypes.common import is_categorical_dtype
tables = pytest.importorskip('tables')
from pandas.io import pytables as pytables # noqa:E402
from pandas.io.pytables import (TableIterator, # noqa:E402
HDFStore, Term, read_hdf,
PossibleDataLossError, ClosedFileError)
_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
LooseVersion('2.2') else 'zlib')
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(), path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [create_tempfile(p) for p in path]
yield filenames
else:
filenames = [create_tempfile(path)]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
class Base(object):
@classmethod
def setup_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def teardown_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setup_method(self, method):
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def teardown_method(self, method):
pass
@pytest.mark.single
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestHDFStore(Base):
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl['a']) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series', o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series', o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame', o))
with catch_warnings(record=True):
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel', o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path, 'table', append=True)
result = read_hdf(path, 'table', where=['index>2'])
assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True)
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True)
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', append=False, format='fixed')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False, format='f')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False)
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=True, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# append to False
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# formats
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format=None)
assert_frame_equal(store.select('df'), df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='f')
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='fixed')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=True, format='foo')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=False, format='bar')
# File path doesn't exist
path = ""
pytest.raises(compat.FileNotFoundError,
read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
_maybe_remove(store, 'df')
store.put('df', df)
assert not store.get_storer('df').is_table
pytest.raises(ValueError, store.append, 'df2', df)
pd.set_option('io.hdf.default_format', 'table')
_maybe_remove(store, 'df')
store.put('df', df)
assert store.get_storer('df').is_table
_maybe_remove(store, 'df2')
store.append('df2', df)
assert store.get_storer('df').is_table
pd.set_option('io.hdf.default_format', None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
df.to_hdf(path, 'df')
with HDFStore(path) as store:
assert not store.get_storer('df').is_table
pytest.raises(ValueError, df.to_hdf, path, 'df2', append=True)
pd.set_option('io.hdf.default_format', 'table')
df.to_hdf(path, 'df3')
with HDFStore(path) as store:
assert store.get_storer('df3').is_table
df.to_hdf(path, 'df4', append=True)
with HDFStore(path) as store:
assert store.get_storer('df4').is_table
pd.set_option('io.hdf.default_format', None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
with catch_warnings(record=True):
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
assert len(store) == 5
expected = {'/a', '/b', '/c', '/d', '/foo/bar'}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self):
with ensure_clean_store(self.path) as store:
# GH 12221
assert list(store) == []
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store.info()
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
with catch_warnings(record=True):
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store['df'] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, 'bah')
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df)
s = store.get_storer('df')
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
assert 'a' in store
assert 'b' in store
assert 'c' not in store
assert 'foo/bar' in store
assert '/foo/bar' in store
assert '/foo/b' not in store
assert 'bar' not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store['node())'] = tm.makeDataFrame()
assert 'node())' in store
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
assert store.root.a._v_attrs.pandas_version == '0.15.2'
assert store.root.b._v_attrs.pandas_version == '0.15.2'
assert store.root.df1._v_attrs.pandas_version == '0.15.2'
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node('df2')._v_attrs.pandas_version = None
pytest.raises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r', 'r+']:
pytest.raises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r', 'r+']:
def f():
with HDFStore(path, mode=mode) as store: # noqa
pass
pytest.raises(IOError, f)
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r', 'r+']:
pytest.raises(IOError, df.to_hdf,
path, 'df', mode=mode)
df.to_hdf(path, 'df', mode='w')
else:
df.to_hdf(path, 'df', mode=mode)
# conv read
if mode in ['w']:
pytest.raises(ValueError, read_hdf,
path, 'df', mode=mode)
else:
result = read_hdf(path, 'df', mode=mode)
assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
result = read_hdf(path, 'df')
assert_frame_equal(result, df)
check('r')
check('r+')
check('a')
check('w')
check_default_mode()
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
pytest.raises(PossibleDataLossError, store.open, 'w')
store.close()
assert not store.is_open
# truncation ok here
store.open('w')
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
assert store.is_open
assert len(store) == 1
assert store._mode == 'r'
store.close()
assert not store.is_open
# reopen as append
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
# reopen as append (again)
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path, mode='a', driver='H5FD_CORE',
driver_core_backing_store=0)
store['df'] = df
store.append('df2', df)
tm.assert_frame_equal(store['df'], df)
tm.assert_frame_equal(store['df2'], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
pytest.raises(KeyError, store.get, 'b')
@pytest.mark.parametrize('where, expected', [
('/', {
'': ({'first_group', 'second_group'}, set()),
'/first_group': (set(), {'df1', 'df2'}),
'/second_group': ({'third_group'}, {'df3', 's1'}),
'/second_group/third_group': (set(), {'df4'}),
}),
('/second_group', {
'/second_group': ({'third_group'}, {'df3', 's1'}),
'/second_group/third_group': (set(), {'df4'}),
})
])
def test_walk(self, where, expected):
# GH10143
objs = {
'df1': pd.DataFrame([1, 2, 3]),
'df2': pd.DataFrame([4, 5, 6]),
'df3': pd.DataFrame([6, 7, 8]),
'df4': pd.DataFrame([9, 10, 11]),
's1': pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
'a1': np.array([[1, 2, 3], [4, 5, 6]]),
'tb1': np.array([(1, 2, 3), (4, 5, 6)], dtype='i,i,i'),
'tb2': np.array([(7, 8, 9), (10, 11, 12)], dtype='i,i,i')
}
with ensure_clean_store('walk_groups.hdf', mode='w') as store:
store.put('/first_group/df1', objs['df1'])
store.put('/first_group/df2', objs['df2'])
store.put('/second_group/df3', objs['df3'])
store.put('/second_group/s1', objs['s1'])
store.put('/second_group/third_group/df4', objs['df4'])
# Create non-pandas objects
store._handle.create_array('/first_group', 'a1', objs['a1'])
store._handle.create_table('/first_group', 'tb1', obj=objs['tb1'])
store._handle.create_table('/second_group', 'tb2', obj=objs['tb2'])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = '/'.join([path, leaf])
obj = store.get(frame_path)
if 'df' in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, 'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
pytest.raises(AttributeError, getattr, store, 'd')
for x in ['mode', 'path', 'handle', 'complib']:
pytest.raises(AttributeError, getattr, store, x)
# not stores
for x in ['mode', 'path', 'handle', 'complib']:
getattr(store, "_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
pytest.raises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
# _maybe_remove(store, 'f')
# pytest.raises(ValueError, store.put, 'f', df[10:],
# append=True)
# can't put to a table (use append instead)
pytest.raises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +
["I am a very long string index: %s" % i
for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
@td.skip_if_windows_python_3
def test_put_compression_blosc(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_complibs_default_settings(self):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complevel=9)
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'zlib'
# Set complib and check to see if compression is disabled
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complib='zlib')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(self.path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append('dfc', df, complevel=9, complib='blosc')
store.append('df', df)
store.close()
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where='/dfc', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'blosc'
def test_complibs(self):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version('lzo'):
all_complibs.remove('lzo')
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(self.path) as tmpfile:
gname = 'foo'
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode='r')
for node in h5table.walk_nodes(where='/' + gname,
classname='Leaf'):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put('df', df)
expected = store.get('df')
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self):
with ensure_clean_store(self.path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.iloc[:, :10, :])
store.append('wp1', wp.iloc[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.iloc[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.loc[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({
'u08': Series(np.random.randint(0, high=255, size=5),
dtype=np.uint8),
'u16': Series(np.random.randint(0, high=65535, size=5),
dtype=np.uint16),
'u32': Series(np.random.randint(0, high=2**30, size=5),
dtype=np.uint32),
'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
dtype=np.uint64)}, index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
# 64-bit indices not yet supported
store.append('uints', uint_data, data_columns=[
'u08', 'u16', 'u32'])
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
assert result.name is None
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select('ns', 'foo>60')
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select('ns', 'foo>70 and index<90')
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5, 'C'] = 'bar'
mi.set_index(['C', 'B'], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df', df, format=format)
assert_frame_equal(df, store['df'])
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeIntIndex, tm.makeDateIndex]:
check('table', index)
check('fixed', index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed', tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table', index)
check('fixed', index)
else:
# only support for fixed types (and they have a perf warning)
pytest.raises(TypeError, check, 'table', index)
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
check('fixed', index)
@pytest.mark.skipif(not is_platform_little_endian(),
reason="reason platform is not little endian")
def test_encoding(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo', B='bar'), index=range(5))
df.loc[2, 'A'] = np.nan
df.loc[3, 'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df', Term('columns=A', encoding='ascii'))
tm.assert_frame_equal(result, expected)
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(pd.Series(val, dtype=dtype))
def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
with ensure_clean_path(self.path) as store:
s.to_hdf(store, key, format='table', encoding=encoding,
nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = s.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
assert_series_equal(s_nan, retr, check_dtype=False,
check_categorical=False)
else:
assert_series_equal(s_nan, retr)
for s in examples:
roundtrip(s)
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),
'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.loc[:, 'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, 'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.loc[:, 'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pd.set_option('io.hdf.dropna_table', False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pd.set_option('io.hdf.dropna_table', True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar'},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})
with ensure_clean_path(self.path) as path:
df_with_missing.to_hdf(path, 'df_with_missing', format='table')
reloaded = read_hdf(path, 'df_with_missing')
tm.assert_frame_equal(df_with_missing, reloaded)
matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]],
[[np.nan, np.nan, np.nan], [np.nan, 5, 6]],
[[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]]
with catch_warnings(record=True):
panel_with_missing = Panel(matrix,
items=['Item1', 'Item2', 'Item3'],
major_axis=[1, 2],
minor_axis=['A', 'B', 'C'])
with ensure_clean_path(self.path) as path:
panel_with_missing.to_hdf(
path, 'panel_with_missing', format='table')
reloaded_panel = read_hdf(path, 'panel_with_missing')
tm.assert_panel_equal(panel_with_missing, reloaded_panel)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.iloc[:, :2], axes=['columns'])
store.append('df1', df.iloc[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', 'index=df.index[0:4]'))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select('df1',
'columns=A and index>df.index[4]')
def test_append_with_different_block_ordering(self):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df['index'] = range(10)
df['index'] += i * 10
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1] * len(df), dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index', inplace=True)
store.append('df', df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10, 2),
columns=list('AB'), dtype='float64')
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
store.append('df', df)
# store additional fields in different blocks
df['int16_2'] = Series([1] * len(df), dtype='int16')
pytest.raises(ValueError, store.append, 'df', df)
# store multile additional fields in different blocks
df['float_3'] = Series([1.] * len(df), dtype='float64')
pytest.raises(ValueError, store.append, 'df', df)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
wp = tm.makePanel()
wp2 = wp.rename(
minor_axis={x: "%s_extra" % x for x in wp.minor_axis})
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(
minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(
minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
pytest.raises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
pytest.raises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
pytest.raises(ValueError, store.append, 'df_new', df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index('C')
store.append('ss', df['B'], min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss'), df['B'])
# same as above, with data_columns=True
store.append('ss2', df['B'], data_columns=True,
min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss2'), df['B'])
# min_itemsize in index without appending (GH 10381)
store.put('ss3', df, format='table',
min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
store.append('ss3', df2)
tm.assert_frame_equal(store.select('ss3'),
pd.concat([df, df2]))
# same as above, with a Series
store.put('ss4', df['B'], format='table',
min_itemsize={'index': 6})
store.append('ss4', df2['B'])
tm.assert_series_equal(store.select('ss4'),
pd.concat([df['B'], df2['B']]))
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.loc[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.loc[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize, size
df = DataFrame(dict(A='foo', B='bar'), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['B', 'A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=[
'B'], min_itemsize={'values': 200})
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
assert store.get_storer('df').data_columns == ['B']
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo', 'foo', 'foo', 'barh',
'barh', 'barh'], columns=['A'])
_maybe_remove(store, 'df')
pytest.raises(ValueError, store.append, 'df',
df, min_itemsize={'foo': 20, 'foobar': 20})
def test_append_with_empty_string(self):
with ensure_clean_store(self.path) as store:
# with all empty strings (GH 12242)
df = DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', '']})
store.append('df', df[:-1], min_itemsize={'x': 1})
store.append('df', df[-1:], min_itemsize={'x': 1})
tm.assert_frame_equal(store.select('df'), df)
def test_to_hdf_with_min_itemsize(self):
with ensure_clean_path(self.path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index('C')
df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
df2.to_hdf(path, 'ss3', append=True, format='table')
tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),
pd.concat([df, df2]))
# same as above, with a Series
df['B'].to_hdf(path, 'ss4', format='table',
min_itemsize={'index': 6})
df2['B'].to_hdf(path, 'ss4', append=True, format='table')
tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
pd.concat([df['B'], df2['B']]))
@pytest.mark.parametrize("format", ['fixed', 'table'])
def test_to_hdf_errors(self, format):
data = ['\ud800foo']
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(self.path) as path:
# GH 20835
ser.to_hdf(path, 'table', format=format, errors='surrogatepass')
result = pd.read_hdf(path, 'table', errors='surrogatepass')
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc('B')] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indices created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', 'B>0')
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', 'B>0 and index>df.index[3]')
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4, 'string'] = np.nan
df_new.loc[5:6, 'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', "string='foo'")
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'],
min_itemsize={'string': 30, 'string2': 40,
'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc('A')] = 1.
df_new.iloc[0, df_new.columns.get_loc('B')] = -1.
df_new['string'] = 'foo'
sl = df_new.columns.get_loc('string')
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = 'bar'
df_new['string2'] = 'foo'
sl = df_new.columns.get_loc('string2')
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df',
"string='foo' and string2='foo'"
" and A>0 and B<0")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', "string='foo' and string2='cool'")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc,
data_columns=['B', 'C', 'string',
'string2', 'datetime'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns=[
'B', 'C', 'string', 'string2'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &
(df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected)
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1', p)
tm.assert_panel_equal(store.select('p1'), p)
store.append('p2', p, data_columns=True)
tm.assert_panel_equal(store.select('p2'), p)
result = store.select('p2', where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
tm.assert_frame_equal(result.to_frame(), expected)
result = store.select(
'p2', where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
expected = expected[expected.reset_index(
level=['major']).index.isin(['A', 'B'])]
tm.assert_frame_equal(result.to_frame(), expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'],
data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
pytest.raises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
with catch_warnings(record=True):
wp = tm.makePanel()
wp1 = wp.iloc[:, :10, :]
wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']),
10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
pytest.raises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path, 'df', format='table')
result = read_hdf(path, 'df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),
('B', 'a'), ('B', 'b')],
names=['first', 'second'])
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df', df)
tm.assert_frame_equal(store['df'], expected,
check_index_type=True,
check_column_type=True)
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
pytest.raises(ValueError, store.put, 'df2', df,
format='table', data_columns=['A'])
pytest.raises(ValueError, store.put, 'df3', df,
format='table', data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df, df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3, 4),
columns=Index(list('ABCD'), name='foo'))
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),
s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index())
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', None, None]))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s', s)
xp = Series(np.zeros(12), index=make_index(
['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 'a', 't']))
pytest.raises(ValueError, store.append, 'df', df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'],
index=make_index(['date', 'date', 'date']))
pytest.raises(ValueError, store.append, 'df', df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 's', 't']))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select(
'df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"), s)
def test_mi_data_columns(self):
# GH 14435
idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),
range(5)], names=['date', 'id'])
df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=True)
actual = store.select('df', where='id == 1')
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df)
pytest.raises(TypeError, store.select, 'df', columns=['A'])
pytest.raises(TypeError, store.select,
'df', where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path, mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result, obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
with catch_warnings(record=True):
p = tm.makePanel()
check(p, assert_panel_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df', df_empty)
pytest.raises(KeyError, store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))
store.append('df', df)
assert_frame_equal(store.select('df'), df)
store.append('df', df_empty)
assert_frame_equal(store.select('df'), df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2', df)
assert_frame_equal(store.select('df2'), df)
with catch_warnings(record=True):
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p', p_empty)
pytest.raises(KeyError, store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3, 4, 5), items=list('ABC'))
store.append('p', p)
assert_panel_equal(store.select('p'), p)
store.append('p', p_empty)
assert_panel_equal(store.select('p'), p)
# store
store.put('p2', p_empty)
assert_panel_equal(store.select('p2'), p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
pytest.raises(TypeError, store.append, 'df', df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# directly ndarray
pytest.raises(TypeError, store.append, 'df', np.arange(10))
# series directly
pytest.raises(TypeError, store.append,
'df', Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append('df', df)
df['foo'] = 'foo'
pytest.raises(ValueError, store.append, 'df', df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
pytest.raises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes, store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes, store['df_i8'].dtypes)
# incompatible dtype
pytest.raises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(
np.array([[1], [2], [3]], dtype='f4'), columns=['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes, store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame({c: Series(np.random.randint(5), dtype=c)
for c in ['float32', 'float64', 'int32',
'int64', 'int16', 'int8']})
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
'int64': 1, 'object': 1, 'datetime64[ns]': 2})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
with catch_warnings(record=True):
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp._consolidate()
with catch_warnings(record=True):
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
dtypes = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
dtypes.append(('unicode', u('\\u03c3')))
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
pytest.raises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion('1.15.0'),
reason=("Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"))
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01',
datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(
Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed', s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table', s)
result = store.select('table')
assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self):
# GH 17618
time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern')
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='fixed')
recons = store['frame']
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(
'20130101') + timedelta(days=i, seconds=10) for i in range(10)]))
df['C'] = df['A'] - df['B']
df.loc[3:5, 'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df')
assert_frame_equal(result, df)
result = store.select('df', where="C<100000")
assert_frame_equal(result, df)
result = store.select('df', where="C<pd.Timedelta('-3D')")
assert_frame_equal(result, df.iloc[3:])
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df', "C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result, df.iloc[6:])
result = store.select('df', "C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2', df)
result = store.select('df2')
assert_frame_equal(result, df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
assert len(store) == 1
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
assert len(store) == 0
# nonexistence
pytest.raises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
assert len(store) == 1
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
assert len(store) == 1
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
assert len(store) == 0
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# non-existance
crit1 = 'index>foo'
pytest.raises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
assert n == 120
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
pytest.raises(ValueError, store.remove,
'wp', ['foo'])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
assert n == 120 - 32
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
assert n == 32
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
assert n == 32
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
assert n == 120 - 32
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
assert n == 120 - 32
result = store.select('wp5')
expected = wp.reindex(
major_axis=(wp.major_axis[:16 // 4]
.union(wp.major_axis[-16 // 4:])))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
assert n == 0
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
# TODO: unused?
date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa
crit = 'major_axis=date'
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
assert n == 28
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(
wp.major_axis[np.arange(0, 20, 3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = 'major_axis=date4'
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
assert n == 36
result = store.select('wp3')
expected = wp.reindex(
major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = 'major_axis>date'
crit2 = "minor_axis=['A', 'D']"
n = store.remove('wp', where=[crit1])
assert n == 56
n = store.remove('wp', where=[crit2])
assert n == 32
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = 'major_axis=date1'
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = 'major_axis=date2'
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=(wp.major_axis
.difference(date1)
.difference(Index([date2]))
))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = 'major_axis=date3'
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(major_axis=wp.major_axis
.difference(date1)
.difference(Index([date2]))
.difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where="major_axis>wp.major_axis[-1]")
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[0:4, 'string'] = 'bar'
wp = tm.makePanel()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
# some invalid terms
pytest.raises(ValueError, store.select,
'wp', "minor=['A', 'B']")
pytest.raises(ValueError, store.select,
'wp', ["index=['20121114']"])
pytest.raises(ValueError, store.select, 'wp', [
"index=['20121114', '20121114']"])
pytest.raises(TypeError, Term)
# more invalid
pytest.raises(
ValueError, store.select, 'df', 'df.index[3]')
pytest.raises(SyntaxError, store.select, 'df', 'index>')
pytest.raises(
ValueError, store.select, 'wp',
"major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table', data_columns=True)
# check ok
read_hdf(path, 'dfq',
where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, 'dfq', where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table')
pytest.raises(ValueError, read_hdf, path,
'dfq', where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
wp = tm.makePanel()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(),
0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
store.put('wp', wp, format='table')
store.put('wpneg', wpneg, format='table')
# panel
result = store.select(
'wp',
"major_axis<'20000108' and minor_axis=['A', 'B']")
expected = wp.truncate(
after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select(
'wp', where=("major_axis<'20000108' "
"and minor_axis=['A', 'B']"))
expected = wp.truncate(
after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
with catch_warnings(record=True):
# valid terms
terms = [('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
with tm.assert_raises_regex(
TypeError, 'Only named functions are supported'):
store.select(
'wp',
'major_axis == (lambda x: x)("20130101")')
with catch_warnings(record=True):
# check USub node parsing
res = store.select('wpneg', 'items == -1')
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assert_raises_regex(NotImplementedError,
'Unary addition '
'not supported'):
store.select('wpneg', 'items == +1')
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
result = store.select(
'wp', where=("major_axis>20000102 "
"and minor_axis=['A', 'B']"))
expected = wp.loc[:, wp.major_axis >
Timestamp('20000102'), ['A', 'B']]
assert_panel_equal(result, expected)
store.remove('wp', 'major_axis>20000103')
result = store.select('wp')
expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = Panel(np.random.randn(2, 5, 4),
items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
# stringified datetimes
result = store.select(
'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp',
"major_axis=[datetime.datetime(2000, 1, 2, 0, 0), "
"datetime.datetime(2000, 1, 3, 0, 0)]")
expected = wp.loc[:, [Timestamp('20000102'),
Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select(
'wp', "minor_axis=['A', 'B']")
expected = wp.loc[:, :, ['A', 'B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),
index=pd.date_range('20130101', periods=20))
store.put('df', df, format='table')
expected = df[df.index > pd.Timestamp('20130105')]
import datetime # noqa
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
result = store.select('df', 'index>datetime(2013,1,5)')
assert_frame_equal(result, expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal,
check_index_type=False)
def test_sparse_series(self):
s = tm.makeStringSeries()
s.iloc[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.iloc[3:5, 1:3] = np.nan
s.iloc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal)
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
pytest.skip('known failer on some windows platforms')
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_frame(self, compression):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=compression)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=compression)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=compression)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self):
# GH 13884
df = pd.DataFrame({'A': [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize('UTC')
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
recons = store['frame']
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize('table_format', ['table', 'fixed'])
def test_store_index_name_numpy_str(self, table_format):
# GH #13492
idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1),
datetime.date(2000, 1, 2)]),
name=u('cols\u05d2'))
idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1),
datetime.date(2010, 1, 2)]),
name=u('rows\u05d0'))
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format=table_format)
df2 = read_hdf(path, 'df')
assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == text_type
assert type(df2.columns.name) == text_type
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
tm.assert_series_equal(recons, series)
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_store_mixed(self, compression):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=compression)
def test_wide(self):
with catch_warnings(record=True):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=['A'])
expected = df.loc[:, ['A']]
assert_frame_equal(result, expected)
# dups across dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['A']]
result = store.select('df', columns=['A'])
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['B', 'A']]
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df', df)
store.append('df', df)
expected = df.loc[:, ['B', 'A']]
expected = concat([expected, expected])
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_wide_table_dups(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
with catch_warnings(record=True):
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
arr = np.random.binomial(n=1, p=.01, size=(1000, 10))
df = DataFrame(arr).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1000)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed
# (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
def test_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(np.random.randn(100, 100, 100),
items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100),
minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', 'items=items')
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# pytest.raises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(
ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])
df['object'] = 'foo'
df.loc[4:5, 'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
expected = (df[df.boolv == True] # noqa
.reindex(columns=['A', 'boolv']))
for v in [True, 'true', 1]:
result = store.select('df', 'boolv == %s' % str(v),
columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
expected = (df[df.boolv == False] # noqa
.reindex(columns=['A', 'boolv']))
for v in [False, 'false', 0]:
result = store.select(
'df', 'boolv == %s' % str(v), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
store.append('df1', df, data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values'] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values'] > 2.0]
store.append('df2', df, data_columns=True, index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values'] > 2.0]
store.append('df4', df, data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
expected = df[df['A'] > 0]
store.append('df', df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select('df', where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +
['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(['a', 'b', 'c'])]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select('df', 'B=selector')
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', 'ts=selector')
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [s for s in store.select('df', iterator=True)]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=100)]
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df_non_table')
pytest.raises(TypeError, read_hdf, path,
'df_non_table', chunksize=100)
pytest.raises(TypeError, read_hdf, path,
'df_non_table', iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df', format='table')
results = [s for s in read_hdf(path, 'df', chunksize=100)]
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, 'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1', df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(
columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2', df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [s for s in store.select('df', chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1', periods=3, freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df, result)
for attr in ['freq', 'tz', 'name']:
for idx in ['index', 'columns']:
assert (getattr(getattr(df, idx), attr, None) ==
getattr(getattr(result, idx), attr, None))
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1',
periods=3, freq='D'))))
store.append('data', df2)
assert store.get_storer('data').info['index']['freq'] is None
# this is ok
_maybe_remove(store, 'df2')
df2 = DataFrame(dict(
A=Series(lrange(3),
index=[Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20020101')])))
store.append('df2', df2)
df3 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
store.append('df2', df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1',
periods=3, freq='H'))))
df.to_hdf(path, 'data', mode='w', append=True)
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
df2.to_hdf(path, 'data', append=True)
idx = date_range('2000-1-1', periods=3, freq='H')
idx.name = 'foo'
df = DataFrame(dict(A=Series(lrange(3), index=idx)))
df.to_hdf(path, 'data', mode='w', append=True)
assert read_hdf(path, 'data').index.name == 'foo'
with catch_warnings(record=True):
idx2 = date_range('2001-1-1', periods=3, freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))
df2.to_hdf(path, 'data', append=True)
assert read_hdf(path, 'data').index.name is None
def test_panel_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"',
("minor_axis=['A', 'B']")])
expected = wp.truncate(
before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
assert crit1.env.scope['date'] == date
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.loc[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.loc[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
pytest.raises(
ValueError, store.select, 'df_time', "index>0")
# can't select if not written as table
# store['frame'] = df
# pytest.raises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4], 'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & '
'index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index > df.index[3]) & (
df.index <= df.index[6])) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string != 'bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
pytest.raises(NotImplementedError,
store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(['A', 'B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select(
'df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=[
'A', 'B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({'A': [1, 1, 2, 2, 3]})
parms.to_hdf(pp, 'df', mode='w',
format='table', data_columns=['A'])
selection = read_hdf(pp, 'df', where='A=[2,3]')
hist = DataFrame(np.random.randn(25, 1),
columns=['data'],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5)
for j in range(5)],
names=['l1', 'l2']))
hist.to_hdf(hh, 'df', mode='w', format='table')
expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]')
# sccope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select('df', where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, 'df', where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, 'df', where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df', where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
# not implemented
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.loc[2:7, 'x'] = ''
store.append('df', df, data_columns=['x'])
result = store.select('df', 'x=none')
expected = df[df.x == 'none']
assert_frame_equal(result, expected)
try:
result = store.select('df', 'x!=none')
expected = df[df.x != 'none']
assert_frame_equal(result, expected)
except Exception as detail:
pprint_thing("[{0}]".format(detail))
pprint_thing(store)
pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x == '', 'x'] = np.nan
store.append('df2', df2, data_columns=['x'])
result = store.select('df2', 'x!=none')
expected = df2[isna(df2.x)]
assert_frame_equal(result, expected)
# int ==/!=
df['int'] = 1
df.loc[2:7, 'int'] = 2
store.append('df3', df, data_columns=['int'])
result = store.select('df3', 'int=2')
expected = df[df.int == 2]
assert_frame_equal(result, expected)
result = store.select('df3', 'int!=2')
expected = df[df.int != 2]
assert_frame_equal(result, expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError,
message='No object named index in the file'):
store.select_column('df', 'index')
store.append('df', df)
# error
pytest.raises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where=['index>5'])
pytest.raises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
pytest.raises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.loc[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all())
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all())
result = store.select('df', where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all())
result = store.select('df', where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20000101', periods=1000))
store.append('df', df)
c = store.select_column('df', 'index')
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# invalid
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df), dtype='float64'))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df) + 1))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5)
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range('20000101', periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append('df2', df)
result = store.select('df2', where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2', where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
pytest.raises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df,
selector='df3')
pytest.raises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
pytest.raises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
@pytest.mark.xfail(run=False,
reason="append_to_multiple_dropna_false "
"is not raising as failed")
def test_append_to_multiple_dropna_false(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a',
dropna=False)
with pytest.raises(ValueError):
store.select_as_multiple(['df1a', 'df2a'])
assert not store.select('df1a').index.equals(
store.select('df2a').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
pytest.raises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df2'], where=['A>0', 'B>0'],
selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
['df1', 'df2'], where='index>df2.index[4]', selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
pytest.raises(ValueError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion('3.1.0'),
reason=("tables version does not support fix for nan selection "
"bug: GH 4858"))
def test_nan_selection_bug_4858(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)),
dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[
3., 4., 5.]), index=[3, 4, 5])
# write w/o the index on that particular column
store.append('df', df, data_columns=True, index=['cols'])
result = store.select('df', where='values>2.0')
assert_frame_equal(result, expected)
def test_start_stop_table(self):
with ensure_clean_store(self.path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ['A']]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self):
# GH 16209
with ensure_clean_store(self.path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple({'selector': ['foo'], 'data': None}, df,
selector='selector')
result = store.select_as_multiple(['selector', 'data'],
selector='selector', start=0,
stop=1)
expected = df.loc[[0], ['foo', 'bar']]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self):
with ensure_clean_store(self.path) as store:
# fixed, GH 8287
df = DataFrame(dict(A=np.random.rand(20),
B=np.random.rand(20)),
index=pd.date_range('20130101', periods=20))
store.put('df', df)
result = store.select(
'df', start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select(
'df', start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put('s', s)
result = store.select('s', start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select('s', start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
dfs = df.to_sparse()
store.put('dfs', dfs)
with pytest.raises(NotImplementedError):
store.select('dfs', start=0, stop=5)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = 'columns=df.columns[:75]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = 'columns=df.columns[:75:2]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame({'a': Series([20111010, 20111011, 20111012]),
'b': Series(['ab', 'cd', 'ab'])})
with ensure_clean_store(self.path) as store:
store.append('test_dataset', df)
result = store.select('test_dataset', start=start, stop=stop)
assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
comparator(retrieved, obj)
def test_multiple_open_close(self):
# gh-4409: open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
# single
store = HDFStore(path)
assert 'CLOSED' not in store.info()
assert store.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
pytest.raises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert 'CLOSED' not in store1.info()
assert 'CLOSED' not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert 'CLOSED' in store1.info()
assert not store1.is_open
assert 'CLOSED' not in store2.info()
assert store2.is_open
store2.close()
assert 'CLOSED' in store1.info()
assert 'CLOSED' in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store2.append('df2', df)
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
store = HDFStore(path)
store.close()
pytest.raises(ClosedFileError, store.keys)
pytest.raises(ClosedFileError, lambda: 'df' in store)
pytest.raises(ClosedFileError, lambda: len(store))
pytest.raises(ClosedFileError, lambda: store['df'])
pytest.raises(AttributeError, lambda: store.df)
pytest.raises(ClosedFileError, store.select, 'df')
pytest.raises(ClosedFileError, store.get, 'df')
pytest.raises(ClosedFileError, store.append, 'df2', df)
pytest.raises(ClosedFileError, store.put, 'df3', df)
pytest.raises(ClosedFileError, store.get_storer, 'df2')
pytest.raises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assert_raises_regex(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self, datapath):
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf/pytables_native.h5'),
mode='r') as store:
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(PY35 and is_platform_windows(),
reason="native2 read fails oddly on windows / 3.5")
def test_pytables_native2_read(self, datapath):
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf', 'pytables_native2.h5'),
mode='r') as store:
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
def test_legacy_table_read(self, datapath):
# legacy table types
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf', 'legacy_table.h5'),
mode='r') as store:
with catch_warnings():
simplefilter("ignore", pd.io.pytables.IncompatibilityWarning)
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
pytest.raises(
Exception, store.select, 'wp1', 'minor_axis=B')
df2 = store.select('df2')
result = store.select('df2', 'index>df2.index[2]')
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
def test_copy(self):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None,
propindexes=True, **kwargs):
try:
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns=['A'])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
assert store['a'].index[0] == dt
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
def test_unicode_longer_encoded(self):
# GH 11234
char = '\u0394'
df = pd.DataFrame({'A': [char]})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# pytest.raises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with pytest.raises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b',
'test & test', 'c', 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# Basic
_maybe_remove(store, 's')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
_maybe_remove(store, 's_ordered')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
_maybe_remove(store, 'df')
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# Dtypes
s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')
store.append('si', s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')
store.append('si2', s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# Multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2', df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert '/df2 ' in info
# assert '/df2/meta/values_block_0/meta' in info
assert '/df2/meta/values_block_1/meta' in info
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# Query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append('df3', df)
df = concat([df, df])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
with pytest.raises(ValueError):
store.append('df3', df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select('df3/meta/s/meta')
assert result is not None
store.remove('df3')
with pytest.raises(KeyError):
store.select('df3/meta/s/meta')
def test_categorical_conversion(self):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ['ESP_012345_6789', 'ESP_987654_3210']
imgids = ['APF00006np', 'APF0001imm']
data = [4.3, 9.8]
# Test without categories
df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype('category')
df.imgids = df.imgids.astype('category')
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(self):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = pd.DataFrame({
'a': ['a', 'b', 'c', np.nan],
'b': [np.nan, np.nan, np.nan, np.nan],
'c': [1, 2, 3, 4],
'd': pd.Series([None] * 4, dtype=object)
})
df['a'] = df.a.astype('category')
df['b'] = df.b.astype('category')
df['d'] = df.b.astype('category')
expected = df
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df')
tm.assert_frame_equal(result, expected)
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
pytest.raises(ValueError, df.to_hdf,
path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(
start='0s', periods=10, freq='1s', name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_columns_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa
assert cols2load_original == cols2load
@ignore_natural_naming_warning
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]
types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
# TODO: Add back to types_should_fail
# https://github.com/pandas-dev/pandas/issues/20907
pass
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
with tm.assert_raises_regex(
ValueError, ("cannot have non-object label "
"DataIndexableCol")):
df.to_hdf(path, 'df', format='table',
data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(
path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
pytest.raises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
pytest.raises(IOError, read_hdf, store, 'df')
def test_read_hdf_generic_buffer_errors(self):
pytest.raises(NotImplementedError, read_hdf, BytesIO(b''), 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, 'df', complib='foolib')
# GH10443
def test_read_nokey(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_table(self):
# GH13231
df = DataFrame({'i': range(5),
'c': Series(list('abacd'), dtype='category')})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a', format='table')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a', format='table')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_empty(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path)
store.close()
pytest.raises(ValueError, read_hdf, path)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self):
# GH11773
from pathlib import Path
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
def test_query_long_float_literal(self):
# GH 14241
df = pd.DataFrame({'A': [1000000000.0009,
1000000000.0011,
1000000000.0015]})
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
cutoff = 1000000000.0006
result = store.select('test', "A < %.4f" % cutoff)
assert result.empty
cutoff = 1000000000.0010
result = store.select('test', "A > %.4f" % cutoff)
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select('test', 'A == %.4f' % exact)
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(self):
# GH 15492
df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'],
'real_date': date_range('2014-01-01', periods=2),
'float': [1.1, 1.2],
'int': [1, 2]},
columns=['date', 'real_date', 'float', 'int'])
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
ts = pd.Timestamp('2014-01-01') # noqa
result = store.select('test', where='real_date > ts')
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ['<', '>', '==']:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp('2014-01-01'),
pd.Timedelta(1, 's')]:
query = 'date {op} v'.format(op=op)
with pytest.raises(TypeError):
result = store.select('test', where=query)
# strings to other columns must be convertible to type
v = 'a'
for col in ['int', 'float', 'real_date']:
query = '{col} {op} v'.format(op=op, col=col)
with pytest.raises(ValueError):
result = store.select('test', where=query)
for v, col in zip(['1', '1.1', '2014-01-01'],
['int', 'float', 'real_date']):
query = '{col} {op} v'.format(op=op, col=col)
result = store.select('test', where=query)
if op == '==':
expected = df.loc[[0], :]
elif op == '>':
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize('format', ['fixed', 'table'])
def test_read_hdf_series_mode_r(self, format):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
with ensure_clean_path(self.path) as path:
series.to_hdf(path, key='data', format=format)
result = pd.read_hdf(path, key='data', mode='r')
tm.assert_series_equal(result, series)
@pytest.mark.skipif(not PY36, reason="Need python 3.6")
def test_fspath(self):
with tm.ensure_clean('foo.h5') as path:
with pd.HDFStore(path) as store:
assert os.fspath(store) == str(path)
def test_read_py2_hdf_file_in_py3(self, datapath):
# GH 16781
# tests reading a PeriodIndex DataFrame written in Python2 in Python3
# the file was generated in Python 2.7 like so:
#
# df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex(
# ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
# df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')
expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex(
['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf',
'periodindex_0.20.1_x86_64_darwin_2.7.13.h5'),
mode='r') as store:
result = store['p']
assert_frame_equal(result, expected)
class TestHDFComplexValues(Base):
# GH10447
def test_complex_fixed(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_table(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', mode='w')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_table(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
result = store.select('df', where='A>2')
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_complex_across_dimensions_fixed(self):
with catch_warnings(record=True):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
objs = [s, df, p]
comps = [tm.assert_series_equal, tm.assert_frame_equal,
tm.assert_panel_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='fixed')
reread = read_hdf(path, 'obj')
comp(obj, reread)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
with catch_warnings(record=True):
p = Panel({'One': df, 'Two': df})
objs = [df, p]
comps = [tm.assert_frame_equal, tm.assert_panel_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='table')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_indexing_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex128},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
pytest.raises(TypeError, store.append,
'df', df, data_columns=['C'])
def test_complex_series_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
with ensure_clean_path(self.path) as path:
pytest.raises(TypeError, s.to_hdf, path, 'obj', format='t')
with ensure_clean_path(self.path) as path:
s.to_hdf(path, 'obj', format='t', index=False)
reread = read_hdf(path, 'obj')
tm.assert_series_equal(s, reread)
def test_complex_append(self):
df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
'b': np.random.randn(100)})
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['b'])
store.append('df', df)
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
class TestTimezones(Base):
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError(
"invalid tz comparison [%s] [%s]" % (a_e, b_e))
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
# filename issues.
from pandas._libs.tslibs.timezones import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(
'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
expected = df[df.A >= df.A[3]]
result = store.select('df_tz', where='A>=df.A[3]')
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130603',
tz=gettz('US/Eastern'))),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('EET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('CET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',
tz='US/Eastern') +
timedelta(hours=1) * i
for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
self._compare_with_tz(store.select(
'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='US/Eastern')),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='EET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='CET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == DatetimeIndex(result.values).tz
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
def test_timezones_fixed(self):
with ensure_clean_store(self.path) as store:
# index
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, 'df')
df = DataFrame({'A': rng,
'B': rng.tz_convert('UTC').tz_localize(None),
'C': rng.tz_convert('CET'),
'D': range(len(rng))}, index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_index_equal(recons.index, rng)
assert rng.tz == recons.index.tz
@td.skip_if_windows
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
with ensure_clean_store(self.path) as store:
with set_timezone('EST5EDT'):
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
with set_timezone('CST6CDT'):
result = store['obj1']
assert_frame_equal(result, df)
def test_legacy_datetimetz_object(self, datapath):
# legacy from < 0.17.0
# 8260
expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf', 'datetimetz_object.h5'),
mode='r') as store:
result = store['df']
assert_frame_equal(result, expected)
def test_dst_transitions(self):
# make sure we are not failing on transaitions
with ensure_clean_store(self.path) as store:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10min')]:
_maybe_remove(store, 'df')
df = DataFrame({'A': range(len(i)), 'B': i}, index=i)
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
| bsd-3-clause |
cpcloud/dynd-python | dynd/benchmarks/benchmark_random.py | 8 | 1751 | from operator import add
from dynd import nd, ndt
import matplotlib
import matplotlib.pyplot
from benchrun import Benchmark, median
from benchtime import Timer, CUDATimer
#size = [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000]
size = [10, 100, 1000, 10000, 100000, 1000000, 10000000]
class UniformBenchmark(Benchmark):
parameters = ('size',)
size = size
def __init__(self, cuda = False):
Benchmark.__init__(self)
self.cuda = cuda
@median
def run(self, size):
if self.cuda:
dst_tp = ndt.type('cuda_device[{} * float64]'.format(size))
else:
dst_tp = ndt.type('{} * float64'.format(size))
dst = nd.empty(dst_tp)
with CUDATimer() if self.cuda else Timer() as timer:
nd.uniform(dst_tp = dst_tp)
return timer.elapsed_time()
class NumPyUniformBenchmark(Benchmark):
parameters = ('size',)
size = size
@median
def run(self, size):
import numpy as np
with Timer() as timer:
np.random.uniform(size = size)
return timer.elapsed_time()
class PyCUDAUniformBenchmark(Benchmark):
parameters = ('size',)
size = size
def __init__(self, gen):
Benchmark.__init__(self)
self.gen = gen
@median
def run(self, size):
import numpy as np
with CUDATimer() as timer:
self.gen.gen_uniform(size, np.float64)
return timer.elapsed_time()
if __name__ == '__main__':
cuda = True
benchmark = UniformBenchmark(cuda = cuda)
benchmark.plot_result(loglog = True)
benchmark = NumPyUniformBenchmark()
benchmark.plot_result(loglog = True)
if cuda:
from pycuda import curandom
benchmark = PyCUDAUniformBenchmark(curandom.XORWOWRandomNumberGenerator())
benchmark.plot_result(loglog = True)
matplotlib.pyplot.show() | bsd-2-clause |
omrihar/1_npfi | fig2.py | 1 | 8444 | #!/usr/bin/env python2
# encoding: utf-8
'''
fig2.py
Written by:
Omri Har-Shemesh, Computational Science Lab, University of Amsterdam
[email protected]
Last updated on 25 September 2015
Description:
Figure 2 in Ref.[1]
References:
[1] O. Har-Shemesh, R. Quax, B. Miñano, A.G. Hoekstra, P.M.A. Sloot, Non-parametric
estimation of Fisher information from real data, arxiv:1507.00964[stat.CO]
Functions:
Dependencies:
numpy
matplotlib
timeit
cPickle
os
gzip
npfi.py
'''
from __future__ import division
import numpy as np
from numpy.random import normal
import matplotlib.pyplot as plt
import os
import gzip
import cPickle as pickle
import timeit
from npfi import npfi, get_pdfs_from_data
def simulate_data(ss, N, rep, e, zero, G, alpha, fname):
""" Simulates the data for the plot
Args:
ss: An array of sigma values to estimate the FI at.
N: Number of data points for each PDF.
rep: Number of repetitions of the whole simulation.
e: The value of the epsilon parameter.
zero: What should npfi consider as zero
G: G for DEFT
alpha: alpha for DEFT
fname: Name of the file where the simulation data will be stored.
Returns:
data: A dictionary with all simulated data, which was also stored to
the file.
"""
# All list containers we need to store the values we compute
FI_deft_median, FI_deft_5, FI_deft_95 = [], [], []
FI_kde_median, FI_kde_5, FI_kde_95 = [], [], []
err_deft_median, err_deft_5, err_deft_95 = [], [], []
err_kde_median, err_kde_5, err_kde_95 = [], [], []
FI_deft_values_all, FI_kde_values_all = [], []
dss = []
# Go over all sigma values in ss
for i, s in enumerate(ss):
real_FI = 2 / s ** 2
ds = s / (e * np.sqrt(N)) # Choose ds according to desired epsilon
# If ds >= s we have a problem of sampling with negative std
while ds >= s:
ds *= 0.9
dss.append(ds)
# Estimate the FI for rep repetitions
FI_deft_values, FI_kde_values = [], []
for j in range(rep):
sim_data = [normal(size=N, scale=s),
normal(size=N, scale=s-ds),
normal(size=N, scale=s+ds)]
pdfs_deft, bbox_deft = get_pdfs_from_data(sim_data, method="deft", G=G,
alpha=alpha, bbox="adjust")
pdfs_kde, bbox_kde = get_pdfs_from_data(sim_data, method="gaussian_kde")
FI_deft, a, b = npfi(pdfs_deft, ds, bounds=bbox_deft,
logarithmic=False, zero=zero, N=N)
FI_kde, a, b = npfi(pdfs_kde, ds, bounds=bbox_kde,
logarithmic=True, zero=zero, N=N)
FI_deft_values.append(FI_deft)
FI_kde_values.append(FI_kde)
# More convenient to use as numpy arrays
FI_deft_values = np.array(FI_deft_values)
FI_kde_values = np.array(FI_kde_values)
FI_deft_values_all.append(FI_deft_values)
FI_kde_values_all.append(FI_kde_values)
# Compute statistics from the values we obtained
FI_deft_median.append(np.median(FI_deft_values))
FI_deft_5.append(np.percentile(FI_deft_values, 5))
FI_deft_95.append(np.percentile(FI_deft_values, 95))
FI_kde_median.append(np.median(FI_kde_values))
FI_kde_5.append(np.percentile(FI_kde_values, 5))
FI_kde_95.append(np.percentile(FI_kde_values, 95))
# Compute relative error statistics
err_deft_values = (FI_deft_values - real_FI) / real_FI
err_deft_median.append(np.median(err_deft_values))
err_deft_5.append(np.percentile(err_deft_values, 5))
err_deft_95.append(np.percentile(err_deft_values, 95))
err_kde_values = (FI_kde_values - real_FI) / real_FI
err_kde_median.append(np.median(err_kde_values))
err_kde_5.append(np.percentile(err_kde_values, 5))
err_kde_95.append(np.percentile(err_kde_values, 95))
if __debug__:
print("Finished %d from %d values" % (i+1, len(ss)))
f = gzip.open(fname, "wb")
data = dict(ss=ss, dss=dss, FI_deft_values_all=FI_deft_values_all,
FI_kde_values_all=FI_kde_values_all,
FI_deft_median=FI_deft_median, FI_kde_median=FI_kde_median,
FI_deft_5=FI_deft_5, FI_deft_95=FI_deft_95,
FI_kde_5=FI_kde_5, FI_kde_95=FI_kde_95,
err_deft_median=err_deft_median, err_kde_median=err_kde_median,
err_deft_5=err_deft_5, err_deft_95=err_deft_95,
err_kde_5=err_kde_5, err_kde_95=err_kde_95)
pickle.dump(data, f)
f.close()
return data
def plot_data(data, fname=None):
""" Plots the data, either using plt.show or saves to a file.
Args:
data: The data produced by sim_data
fname: If None, plot to screen, else save figure as fname.
Returns: Nothing
"""
x = data['ss']
xx = np.linspace(data['ss'][0], data['ss'][-1]*1.05, 1000)
# Analytic curve
y = 2.0 / (x ** 2)
yy = 2.0 / (xx ** 2)
# Get the data to plot
y1 = np.array(data['FI_deft_median'])
y1_rel_err = np.array(data['err_deft_median'])
y2 = np.array(data['FI_kde_median'])
y2_rel_err = np.array(data['err_kde_median'])
y1_err = [np.array(y1-data['FI_deft_5']), np.array(data['FI_deft_95'])-y1]
y2_err = [np.array(y2-data['FI_kde_5']), np.array(data['FI_kde_95'])-y2]
y1_err_spread = [np.array(y1_rel_err-data['err_deft_5']), np.array(data['err_deft_95'])-y1_rel_err]
y2_err_spread = [np.array(y2_rel_err-data['err_kde_5']), np.array(data['err_kde_95'])-y2_rel_err]
# Some plotting settings
plt.style.use("publication")
fig = plt.figure()
fig.set_size_inches(5, 5)
# Should we skip the first value because it's FI is too high? 0 means no, 1
# means skip 1, etc...
skip_first = 1
y1_err = [y1_err[0][skip_first:], y1_err[1][skip_first:]]
y2_err = [y2_err[0][skip_first:], y2_err[1][skip_first:]]
y1_err_spread = [y1_err_spread[0][skip_first:], y1_err_spread[1][skip_first:]]
y2_err_spread = [y2_err_spread[0][skip_first:], y2_err_spread[1][skip_first:]]
# Upper plot (showing FI values)
ax1 = fig.add_subplot(211)
ax1.plot(xx, yy, "k", lw=2.0, label="True value")
lw = 1.5
deft_color = "#00a442"
ax1.errorbar(x[skip_first:], y1[skip_first:], y1_err, fmt="o", color=deft_color, lw=lw, label="FI (DEFT)")
ax1.errorbar(x[skip_first:], y2[skip_first:], y2_err, fmt="x", color="#08519c", lw=lw, label="FI (KDE)")
ax1.set_xlim(0.1, 1.05)
ax1.set_ylabel("$g_{\sigma\sigma}$")
ax1.legend(loc='upper right', prop={"size": 8}, numpoints=1)
ax1.set_ylim(0,100)
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.get_xaxis().set_tick_params(direction='in', top=False)
# Relative errors plot
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.errorbar(x[skip_first:], y1_rel_err[skip_first:], y1_err_spread, fmt="o", lw=lw, color=deft_color, label="DEFT Relative Error")
ax2.errorbar(x[skip_first:], y2_rel_err[skip_first:], y2_err_spread, fmt="x", lw=lw, color="#08519c", label="KDE Relative Error")
ax2.get_xaxis().set_tick_params(top=False)
ax2.set_xlim(0.1, 1.05)
ax2.set_ylim(-0.2, 1.2)
ax2.set_xlabel("$\sigma$")
ax2.set_ylabel(r"$\frac{FI-g_{\sigma\sigma}}{g_{\sigma\sigma}}$")
ax2.legend(loc='upper right', prop={"size": 8}, numpoints=1)
if fname is None:
plt.show()
else:
plt.savefig(fname, dpi=700, bbox_inches="tight")
if __name__ == '__main__':
start_time = timeit.default_timer()
# Parameters of the plot
ss = np.linspace(0.1, 1, 10)
N = 10000
rep = 100
e = 0.05
zero = np.power(10.0, -10)
G = 100
alpha = 3
seed = 100
np.random.seed(seed)
fname = "fig2_data_N_%d_rep_%d_e_%.4f_seed_%d.pklz" % (N, rep, e, seed)
if os.path.isfile(fname):
print("Found file!")
f = gzip.open(fname, "rb")
data = pickle.load(f)
f.close()
else:
print("Didn't find file, simulating...")
data = simulate_data(ss, N, rep, e, zero, G, alpha, fname)
if __debug__:
print("Obtaining the data took %.2f seconds" % (timeit.default_timer()-start_time))
plot_data(data)
| mit |
EarToEarOak/RTLSDR-Scanner | rtlsdr_scanner/dialogs_help.py | 1 | 5039 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import multiprocessing
import platform
import sys
from PIL import Image
import matplotlib
import numpy
import serial
import wx
from rtlsdr_scanner.utils_wx import load_bitmap
from rtlsdr_scanner.version import VERSION
class DialogSysInfo(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent=parent, title="System Information")
textVersions = wx.TextCtrl(self,
style=wx.TE_MULTILINE |
wx.TE_READONLY |
wx.TE_DONTWRAP |
wx.TE_NO_VSCROLL)
buttonOk = wx.Button(self, wx.ID_OK)
self.__populate_versions(textVersions)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(textVersions, 1, flag=wx.ALL, border=10)
sizer.Add(buttonOk, 0, flag=wx.ALL | wx.ALIGN_RIGHT, border=10)
self.SetSizerAndFit(sizer)
self.Centre()
def __populate_versions(self, control):
imageType = 'Pillow'
try:
imageVer = Image.PILLOW_VERSION
except AttributeError:
imageType = 'PIL'
imageVer = Image.VERSION
visvisVer = 'Not installed'
if not hasattr(sys, 'frozen'):
try:
import visvis as vv
visvisVer = vv.__version__
except ImportError:
pass
versions = ('Hardware:\n'
'\tProcessor: {}, {} cores\n\n'
'Software:\n'
'\tOS: {}, {}\n'
'\tPython: {}\n'
'\tmatplotlib: {}\n'
'\tNumPy: {}\n'
'\t{}: {}\n'
'\tpySerial: {}\n'
'\tvisvis: {}\n'
'\twxPython: {}\n'
).format(platform.processor(), multiprocessing.cpu_count(),
platform.platform(), platform.machine(),
platform.python_version(),
matplotlib.__version__,
numpy.version.version,
imageType, imageVer,
serial.VERSION,
visvisVer,
wx.version())
control.SetValue(versions)
dc = wx.WindowDC(control)
extent = list(dc.GetMultiLineTextExtent(versions, control.GetFont()))
extent[0] += wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X) * 2
extent[1] += wx.SystemSettings.GetMetric(wx.SYS_HSCROLL_Y) * 2
control.SetMinSize((extent[0], extent[1]))
self.Layout()
class DialogAbout(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent=parent, title="About")
bitmapIcon = wx.StaticBitmap(self, bitmap=load_bitmap('icon'))
textAbout = wx.StaticText(self, label="A simple spectrum analyser for "
"scanning\n with a RTL-SDR compatible USB "
"device", style=wx.ALIGN_CENTRE)
textLink = wx.HyperlinkCtrl(self, wx.ID_ANY,
label="http://eartoearoak.com/software/rtlsdr-scanner",
url="http://eartoearoak.com/software/rtlsdr-scanner")
textVersion = wx.StaticText(self,
label='v' + '.'.join([str(x) for x in VERSION]))
buttonOk = wx.Button(self, wx.ID_OK)
grid = wx.GridBagSizer(10, 10)
grid.Add(bitmapIcon, pos=(0, 0), span=(3, 1),
flag=wx.ALIGN_LEFT | wx.ALL, border=10)
grid.Add(textAbout, pos=(0, 1), span=(1, 2),
flag=wx.ALIGN_CENTRE | wx.ALL, border=10)
grid.Add(textLink, pos=(1, 1), span=(1, 2),
flag=wx.ALIGN_CENTRE | wx.ALL, border=10)
grid.Add(textVersion, pos=(2, 1), span=(1, 2),
flag=wx.ALIGN_CENTRE | wx.ALL, border=10)
grid.Add(buttonOk, pos=(3, 2),
flag=wx.ALIGN_RIGHT | wx.ALL, border=10)
self.SetSizerAndFit(grid)
self.Centre()
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| gpl-3.0 |
pombredanne/bokeh | bokeh/core/json_encoder.py | 3 | 3319 | ''' Provide a custom JSON encoder for serializing Bokeh models.
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import calendar
import datetime as dt
import decimal
import json
import numpy as np
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import transform_series, transform_array
pd = import_optional('pandas')
rd = import_optional("dateutil.relativedelta")
class BokehJSONEncoder(json.JSONEncoder):
''' Encode values to be used in Bokeh documents or communicated to
a Bokeh server.
'''
def transform_python_types(self, obj):
''' Handle special scalars, use default json encoder otherwise
'''
# Pandas Timestamp
if pd and isinstance(obj, pd.tslib.Timestamp):
return obj.value / 10**6.0 #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Datetime
# datetime is a subclass of date.
elif isinstance(obj, dt.datetime):
return calendar.timegm(obj.timetuple()) * 1000. + obj.microsecond / 1000.
# Date
elif isinstance(obj, dt.date):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
# Decimal
elif isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from ..model import Model
from ..colors import Color
from .properties import HasProps
## array types
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj)
elif isinstance(obj, np.ndarray):
return transform_array(obj)
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, indent=None, **kwargs):
''' Return a serialized JSON representation of a Bokeh model.
'''
pretty = settings.pretty(False)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=encoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/test_sorting.py | 1 | 17592 | import pytest
from itertools import product
from collections import defaultdict
import warnings
from datetime import datetime
import numpy as np
from numpy import nan
from pandas.core import common as com
from pandas import (DataFrame, MultiIndex, merge, concat, Series, compat,
_np_version_under1p10)
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.sorting import (is_int64_overflow_possible,
decons_group_index,
get_group_index,
nargsort,
lexsort_indexer,
safe_sort)
class TestSorting(object):
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': A,
'F': B,
'G': A,
'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict(
{'a': values, 'b': values, 'c': values, 'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[nan] * 5 + list(range(100)) + [nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan] * 5 + list(range(100)) + [nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
try:
# GH 2785; due to a regression in NumPy1.6.2
np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i'))
np.argsort(items2, kind='mergesort')
except TypeError:
pytest.skip('requested sort not available for type')
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge(object):
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
assert len(out) == len(left)
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notna(),
'right': out['right'].notna(),
'inner': out['left'].notna() & out['right'].notna(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == 'outer'
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_decons():
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5)]
testit(label_list, shape)
class TestSafeSort(object):
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype='object')
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
def test_labels(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
labels = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# na_sentinel
labels = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels,
na_sentinel=99)
expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# out of bound indices
labels = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
labels = []
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer(self):
values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object)
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(['b', 1, 0, 'a'], dtype=object)
labels = [0, 1, 2, 3, 0, -1, 1]
result, result_labels = safe_sort(values, labels)
expected = np.array([0, 1, 'a', 'b'], dtype=object)
expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer_from_list(self):
values = ['b', 1, 0, 'a', 0, 'b']
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
if compat.PY2 and not _np_version_under1p10:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
pytest.raises(TypeError, safe_sort, arr)
else:
pytest.raises(TypeError, safe_sort, arr)
def test_exceptions(self):
with tm.assert_raises_regex(TypeError,
"Only list-like objects are allowed"):
safe_sort(values=1)
with tm.assert_raises_regex(TypeError,
"Only list-like objects or None"):
safe_sort(values=[0, 1, 2], labels=1)
with tm.assert_raises_regex(ValueError,
"values should be unique"):
safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
| bsd-3-clause |
gautam1168/tardis | tardis/gui.py | 2 | 31607 | import numpy as np
import matplotlib
#matplotlib.use('KtAgg')
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
from matplotlib import colors
from matplotlib.patches import Circle
from matplotlib.figure import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar
from PySide import QtGui, QtCore
from astropy import units as u
from tardis import analysis, util
# def current_ion_index(index, index_list):
# if not index in index_list:
# return None
# if not (index - 1) in index_list:
# return 0
# else:
# return current_ion_index(index - 1, index_list) + 1
#
# def current_ion_index(index, duplicate_list):
# if duplicate_list[index - 1] != duplicate_list[index]:
# return 0
# else:
# return current_ion_index(index - 1, duplicate_list) + 1
class ModelViewer(QtGui.QWidget):
def __init__(self, parent=None):
# assumes that qt has already been initialized by starting IPython with the flag "--pylab=qt"
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([])
try:
from IPython.lib.guisupport import start_event_loop_qt4
start_event_loop_qt4(app)
except ImportError:
app.exec_()
super(ModelViewer, self).__init__(parent)
self.model = None
self.shell_info = {}
self.line_info = []
self.setGeometry(20, 35, 1250, 500)
self.setWindowTitle('Shells Viewer')
self.tablemodel = SimpleTableModel([['Shell: '], ["Rad. temp", "Ws"]], (1, 0))
self.tableview = QtGui.QTableView()
self.graph = MatplotlibWidget(self, 'model')
self.graph_label = QtGui.QLabel('Select Property:')
self.graph_button = QtGui.QToolButton()
self.spectrum = MatplotlibWidget(self)
self.spectrum_label = QtGui.QLabel('Select Spectrum:')
self.spectrum_button = QtGui.QToolButton()
self.spectrum_span_button = QtGui.QPushButton('Show Wavelength Range')
self.spectrum_line_info_button = QtGui.QPushButton('Show Line Info')
self.layout = QtGui.QHBoxLayout()
self.graph_sublayout = QtGui.QVBoxLayout()
self.graph_subsublayout = QtGui.QHBoxLayout()
self.spectrum_sublayout = QtGui.QVBoxLayout()
self.spectrum_subsublayout = QtGui.QHBoxLayout()
self.tableview.setMinimumWidth(200)
self.tableview.connect(self.tableview.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'), self.graph.highlight_shell)
self.tableview.connect(self.tableview.verticalHeader(), QtCore.SIGNAL('sectionDoubleClicked(int)'),
self.on_header_double_clicked)
self.graph_button.setText('Rad. temp')
self.spectrum_button.setText('spec_flux_angstrom')
self.graph_button.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
self.spectrum_button.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
self.graph_button.setMenu(QtGui.QMenu(self.graph_button))
self.spectrum_button.setMenu(QtGui.QMenu(self.spectrum_button))
self.graph_button.menu().addAction('Rad. temp').triggered.connect(self.change_graph_to_t_rads)
self.graph_button.menu().addAction('Ws').triggered.connect(self.change_graph_to_ws)
self.spectrum_button.menu().addAction('spec_flux_angstrom').triggered.connect(self.change_spectrum_to_spec_flux_angstrom)
self.spectrum_button.menu().addAction('spec_virtual_flux_angstrom').triggered.connect(self.change_spectrum_to_spec_virtual_flux_angstrom)
self.spectrum_span_button.clicked.connect(self.spectrum.show_span)
self.spectrum_line_info_button.clicked.connect(self.spectrum.show_line_info)
self.layout.addWidget(self.tableview)
self.graph_subsublayout.addWidget(self.graph_label)
self.graph_subsublayout.addWidget(self.graph_button)
self.graph_sublayout.addLayout(self.graph_subsublayout)
self.graph_sublayout.addWidget(self.graph)
self.layout.addLayout(self.graph_sublayout)
self.spectrum_subsublayout.addWidget(self.spectrum_span_button)
self.spectrum_subsublayout.addWidget(self.spectrum_label)
self.spectrum_subsublayout.addWidget(self.spectrum_button)
self.spectrum_sublayout.addLayout(self.spectrum_subsublayout)
self.spectrum_sublayout.addWidget(self.spectrum_line_info_button)
self.spectrum_sublayout.addWidget(self.spectrum)
self.spectrum_sublayout.addWidget(self.spectrum.toolbar)
self.layout.addLayout(self.spectrum_sublayout)
self.spectrum_line_info_button.hide()
self.setLayout(self.layout)
def show_model(self, model=None):
if model:
self.change_model(model)
self.tableview.setModel(self.tablemodel)
self.plot_model()
self.plot_spectrum()
self.show()
def update_data(self, model=None):
if model:
self.change_model(model)
self.tablemodel.updateTable()
for index in self.shell_info.keys():
self.shell_info[index].update_tables()
self.plot_model()
if self.graph_button.text == 'Ws':
self.change_graph_to_ws()
self.plot_spectrum()
if self.spectrum_button.text == 'spec_virtual_flux_angstrom':
self.change_spectrum_to_spec_virtual_flux_angstrom()
self.show()
def change_model(self, model):
self.model = model
self.tablemodel.arraydata = []
self.tablemodel.addData(model.t_rads.value.tolist())
self.tablemodel.addData(model.ws.tolist())
def change_spectrum_to_spec_virtual_flux_angstrom(self):
if self.model.spectrum_virtual.luminosity_density_lambda is None:
luminosity_density_lambda = np.zeros_like(self.model.spectrum_virtual.wavelength)
else:
luminosity_density_lambda = self.model.spectrum_virtual.luminosity_density_lambda.value
self.change_spectrum(luminosity_density_lambda, 'spec_flux_angstrom')
def change_spectrum_to_spec_flux_angstrom(self):
if self.model.spectrum.luminosity_density_lambda is None:
luminosity_density_lambda = np.zeros_like(self.model.spectrum.wavelength)
else:
luminosity_density_lambda = self.model.spectrum.luminosity_density_lambda.value
self.change_spectrum(luminosity_density_lambda, 'spec_flux_angstrom')
def change_spectrum(self, data, name):
self.spectrum_button.setText(name)
self.spectrum.dataplot[0].set_ydata(data)
self.spectrum.ax.relim()
self.spectrum.ax.autoscale()
self.spectrum.draw()
def plot_spectrum(self):
self.spectrum.ax.clear()
self.spectrum.ax.set_title('Spectrum')
self.spectrum.ax.set_xlabel('Wavelength (A)')
self.spectrum.ax.set_ylabel('Intensity')
wavelength = self.model.spectrum.wavelength.value
if self.model.spectrum.luminosity_density_lambda is None:
luminosity_density_lambda = np.zeros_like(wavelength)
else:
luminosity_density_lambda = self.model.spectrum.luminosity_density_lambda.value
self.spectrum.dataplot = self.spectrum.ax.plot(wavelength, luminosity_density_lambda, label='b')
self.spectrum.draw()
def change_graph_to_ws(self):
self.change_graph(self.model.ws, 'Ws', '')
def change_graph_to_t_rads(self):
self.change_graph(self.model.t_rads.value, 't_rads', '(K)')
def change_graph(self, data, name, unit):
self.graph_button.setText(name)
self.graph.dataplot[0].set_ydata(data)
self.graph.ax1.relim()
self.graph.ax1.autoscale()
self.graph.ax1.set_title(name + ' vs Shell')
self.graph.ax1.set_ylabel(name + ' ' + unit)
normalizer = colors.Normalize(vmin=data.min(), vmax=data.max())
color_map = plt.cm.ScalarMappable(norm=normalizer, cmap=plt.cm.jet)
color_map.set_array(data)
self.graph.cb.set_clim(vmin=data.min(), vmax=data.max())
self.graph.cb.update_normal(color_map)
if unit == '(K)':
unit = 'T (K)'
self.graph.cb.set_label(unit)
for i, item in enumerate(data):
self.shells[i].set_facecolor(color_map.to_rgba(item))
self.graph.draw()
def plot_model(self):
self.graph.ax1.clear()
self.graph.ax1.set_title('Rad. Temp vs Shell')
self.graph.ax1.set_xlabel('Shell Number')
self.graph.ax1.set_ylabel('Rad. Temp (K)')
self.graph.ax1.yaxis.get_major_formatter().set_powerlimits((0, 1))
self.graph.dataplot = self.graph.ax1.plot(range(len(self.model.t_rads.value)), self.model.t_rads.value)
self.graph.ax2.clear()
self.graph.ax2.set_title('Shell View')
self.graph.ax2.set_xlabel('Arbitrary')
self.graph.ax2.set_ylabel('Arbitrary')
self.shells = []
t_rad_normalizer = colors.Normalize(vmin=self.model.t_rads.value.min(), vmax=self.model.t_rads.value.max())
t_rad_color_map = plt.cm.ScalarMappable(norm=t_rad_normalizer, cmap=plt.cm.jet)
t_rad_color_map.set_array(self.model.t_rads.value)
if self.graph.cb:
self.graph.cb.set_clim(vmin=self.model.t_rads.value.min(), vmax=self.model.t_rads.value.max())
self.graph.cb.update_normal(t_rad_color_map)
else:
self.graph.cb = self.graph.figure.colorbar(t_rad_color_map)
self.graph.cb.set_label('T (K)')
self.graph.normalizing_factor = 0.2 * (self.model.tardis_config.structure.r_outer.value[-1] - self.model.tardis_config.structure.r_inner.value[0]) / self.model.tardis_config.structure.r_inner.value[0]
#self.graph.normalizing_factor = 8e-16
for i, t_rad in enumerate(self.model.t_rads.value):
r_inner = self.model.tardis_config.structure.r_inner.value[i] * self.graph.normalizing_factor
r_outer = self.model.tardis_config.structure.r_outer.value[i] * self.graph.normalizing_factor
self.shells.append(Shell(i, (0,0), r_inner, r_outer, facecolor=t_rad_color_map.to_rgba(t_rad),
picker=self.graph.shell_picker))
self.graph.ax2.add_patch(self.shells[i])
self.graph.ax2.set_xlim(0, self.model.tardis_config.structure.r_outer.value[-1] * self.graph.normalizing_factor)
self.graph.ax2.set_ylim(0, self.model.tardis_config.structure.r_outer.value[-1] * self.graph.normalizing_factor)
self.graph.figure.tight_layout()
self.graph.draw()
def on_header_double_clicked(self, index):
self.shell_info[index] = ShellInfo(index, self)
class ShellInfo(QtGui.QDialog):
def __init__(self, index, parent=None):
super(ShellInfo, self).__init__(parent)
self.parent = parent
self.shell_index = index
self.setGeometry(400, 150, 200, 400)
self.setWindowTitle('Shell %d Abundances' % (self.shell_index + 1))
self.atomstable = QtGui.QTableView()
self.ionstable = QtGui.QTableView()
self.levelstable = QtGui.QTableView()
self.atomstable.connect(self.atomstable.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'),
self.on_atom_header_double_clicked)
self.table1_data = self.parent.model.tardis_config.abundances[self.shell_index]
self.atomsdata = SimpleTableModel([['Z = '], ['Count (Shell %d)' % (self.shell_index + 1)]], iterate_header=(2, 0), index_info=self.table1_data.index.values.tolist())
self.ionsdata = None
self.levelsdata = None
self.atomsdata.addData(self.table1_data.values.tolist())
self.atomstable.setModel(self.atomsdata)
self.layout = QtGui.QHBoxLayout()
self.layout.addWidget(self.atomstable)
self.layout.addWidget(self.ionstable)
self.layout.addWidget(self.levelstable)
self.setLayout(self.layout)
self.ionstable.hide()
self.levelstable.hide()
self.show()
def on_atom_header_double_clicked(self, index):
self.current_atom_index = self.table1_data.index.values.tolist()[index]
self.table2_data = self.parent.model.plasma_array.ion_populations[self.shell_index].ix[self.current_atom_index]
self.ionsdata = SimpleTableModel([['Ion: '], ['Count (Z = %d)' % self.current_atom_index]], iterate_header=(2, 0), index_info=self.table2_data.index.values.tolist())
normalized_data = []
for item in self.table2_data.values:
normalized_data.append(float(item /
self.parent.model.tardis_config.number_densities[self.shell_index]
.ix[self.current_atom_index]))
self.ionsdata.addData(normalized_data)
self.ionstable.setModel(self.ionsdata)
self.ionstable.connect(self.ionstable.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'),
self.on_ion_header_double_clicked)
self.levelstable.hide()
self.ionstable.setColumnWidth(0, 120)
self.ionstable.show()
self.setGeometry(400, 150, 380, 400)
self.show()
def on_ion_header_double_clicked(self, index):
self.current_ion_index = self.table2_data.index.values.tolist()[index]
self.table3_data = self.parent.model.plasma_array.level_populations[self.shell_index].ix[self.current_atom_index,
self.current_ion_index]
self.levelsdata = SimpleTableModel([['Level: '], ['Count (Ion %d)' % self.current_ion_index]], iterate_header=(2, 0), index_info=self.table3_data.index.values.tolist())
normalized_data = []
for item in self.table3_data.values.tolist():
normalized_data.append(float(item / self.table2_data.ix[self.current_ion_index]))
self.levelsdata.addData(normalized_data)
self.levelstable.setModel(self.levelsdata)
self.levelstable.setColumnWidth(0, 120)
self.levelstable.show()
self.setGeometry(400, 150, 580, 400)
self.show()
def update_tables(self):
self.table1_data = self.parent.model.plasma_array[self.shell_index].number_densities
self.atomsdata.index_info=self.table1_data.index.values.tolist()
self.atomsdata.arraydata = []
self.atomsdata.addData(self.table1_data.values.tolist())
self.atomsdata.updateTable()
self.ionstable.hide()
self.levelstable.hide()
self.setGeometry(400, 150, 200, 400)
self.show()
class LineInteractionTables(QtGui.QWidget):
def __init__(self, line_interaction_analysis, atom_data, description):
super(LineInteractionTables, self).__init__()
self.text_description = QtGui.QLabel(str(description))
self.species_table = QtGui.QTableView()
self.transitions_table = QtGui.QTableView()
self.layout = QtGui.QHBoxLayout()
self.line_interaction_analysis = line_interaction_analysis
self.atom_data = atom_data
line_interaction_species_group = line_interaction_analysis.last_line_in.groupby(['atomic_number', 'ion_number'])
self.species_selected = sorted(line_interaction_species_group.groups.keys())
species_symbols = [util.species_tuple_to_string(item, atom_data) for item in self.species_selected]
species_table_model = SimpleTableModel([species_symbols, ['Species']])
species_abundances = (line_interaction_species_group.wavelength.count().astype(float) /
line_interaction_analysis.last_line_in.wavelength.count()).astype(float).tolist()
species_abundances = map(float, species_abundances)
species_table_model.addData(species_abundances)
self.species_table.setModel(species_table_model)
line_interaction_species_group.wavelength.count()
self.layout.addWidget(self.text_description)
self.layout.addWidget(self.species_table)
self.species_table.connect(self.species_table.verticalHeader(), QtCore.SIGNAL('sectionClicked(int)'),
self.on_species_clicked)
self.layout.addWidget(self.transitions_table)
self.setLayout(self.layout)
self.show()
def on_species_clicked(self, index):
current_species = self.species_selected[index]
last_line_in = self.line_interaction_analysis.last_line_in
last_line_out = self.line_interaction_analysis.last_line_out
last_line_in_filter = (last_line_in.atomic_number == current_species[0]).values & \
(last_line_in.ion_number == current_species[1]).values
current_last_line_in = last_line_in[last_line_in_filter].reset_index()
current_last_line_out = last_line_out[last_line_in_filter].reset_index()
current_last_line_in['line_id_out'] = current_last_line_out['line_id']
last_line_in_string = []
last_line_count = []
grouped_line_interactions = current_last_line_in.groupby(['line_id', 'line_id_out'])
exc_deexc_string = 'exc. %d-%d (%.2f A) de-exc. %d-%d (%.2f A)'
for line_id, row in grouped_line_interactions.wavelength.count().iteritems():
current_line_in = self.atom_data.lines.ix[line_id[0]]
current_line_out = self.atom_data.lines.ix[line_id[1]]
last_line_in_string.append(exc_deexc_string % (current_line_in['level_number_lower'],
current_line_in['level_number_upper'],
current_line_in['wavelength'],
current_line_out['level_number_upper'],
current_line_out['level_number_lower'],
current_line_out['wavelength']))
last_line_count.append(int(row))
last_line_in_model = SimpleTableModel([last_line_in_string, ['Num. pkts %d' %
current_last_line_in.wavelength.count()]])
last_line_in_model.addData(last_line_count)
self.transitions_table.setModel(last_line_in_model)
class LineInfo(QtGui.QDialog):
def __init__(self, parent, wavelength_start, wavelength_end):
super(LineInfo, self).__init__(parent)
self.parent = parent
self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 250, 400)
self.setWindowTitle('Line Interaction: %.2f - %.2f (A) ' % (wavelength_start, wavelength_end,
))
self.layout = QtGui.QVBoxLayout()
packet_nu_line_interaction = analysis.LastLineInteraction.from_model(self.parent.model)
packet_nu_line_interaction.packet_filter_mode = 'packet_nu'
packet_nu_line_interaction.wavelength_start = wavelength_start * u.angstrom
packet_nu_line_interaction.wavelength_end = wavelength_end * u.angstrom
line_in_nu_line_interaction = analysis.LastLineInteraction.from_model(self.parent.model)
line_in_nu_line_interaction.packet_filter_mode = 'line_in_nu'
line_in_nu_line_interaction.wavelength_start = wavelength_start * u.angstrom
line_in_nu_line_interaction.wavelength_end = wavelength_end * u.angstrom
self.layout.addWidget(LineInteractionTables(packet_nu_line_interaction, self.parent.model.atom_data, 'filtered by frequency of packet'))
self.layout.addWidget(LineInteractionTables(line_in_nu_line_interaction, self.parent.model.atom_data, 'filtered by frequency of line interaction'))
self.setLayout(self.layout)
self.show()
def get_data(self, wavelength_start, wavelength_end):
self.wavelength_start = wavelength_start * u.angstrom
self.wavelength_end = wavelength_end * u.angstrom
last_line_in_ids, last_line_out_ids = analysis.get_last_line_interaction(self.wavelength_start, self.wavelength_end, self.parent.model)
self.last_line_in, self.last_line_out = self.parent.model.atom_data.lines.ix[last_line_in_ids], self.parent.model.atom_data.lines.ix[last_line_out_ids]
self.grouped_lines_in, self.grouped_lines_out = self.last_line_in.groupby(['atomic_number', 'ion_number']), self.last_line_out.groupby(['atomic_number', 'ion_number'])
self.ions_in, self.ions_out = self.grouped_lines_in.groups.keys(), self.grouped_lines_out.groups.keys()
self.ions_in.sort()
self.ions_out.sort()
self.header_list = []
self.ion_table = (self.grouped_lines_in.wavelength.count().astype(float) / self.grouped_lines_in.wavelength.count().sum()).values.tolist()
for z, ion in self.ions_in:
self.header_list.append('Z = %d: Ion %d' % (z, ion))
def get_transition_table(self, lines, atom, ion):
grouped = lines.groupby(['atomic_number', 'ion_number'])
transitions_with_duplicates = lines.ix[grouped.groups[(atom, ion)]].groupby(['level_number_lower', 'level_number_upper']).groups
transitions = lines.ix[grouped.groups[(atom, ion)]].drop_duplicates().groupby(['level_number_lower', 'level_number_upper']).groups
transitions_count = []
transitions_parsed = []
for item in transitions.values():
c = 0
for ditem in transitions_with_duplicates.values():
c += ditem.count(item[0])
transitions_count.append(c)
s = 0
for item in transitions_count:
s += item
for index in range(len(transitions_count)):
transitions_count[index] /= float(s)
for key, value in transitions.items():
transitions_parsed.append("%d-%d (%.2f A)" % (key[0], key[1], self.parent.model.atom_data.lines.ix[value[0]]['wavelength']))
return transitions_parsed, transitions_count
def on_atom_clicked(self, index):
self.transitionsin_parsed, self.transitionsin_count = self.get_transition_table(self.last_line_in, self.ions_in[index][0], self.ions_in[index][1])
self.transitionsout_parsed, self.transitionsout_count = self.get_transition_table(self.last_line_out, self.ions_out[index][0], self.ions_out[index][1])
self.transitionsindata = SimpleTableModel([self.transitionsin_parsed, ['Lines In']])
self.transitionsoutdata = SimpleTableModel([self.transitionsout_parsed, ['Lines Out']])
self.transitionsindata.addData(self.transitionsin_count)
self.transitionsoutdata.addData(self.transitionsout_count)
self.transitionsintable.setModel(self.transitionsindata)
self.transitionsouttable.setModel(self.transitionsoutdata)
self.transitionsintable.show()
self.transitionsouttable.show()
self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 750, 400)
self.show()
def on_atom_clicked2(self, index):
self.transitionsin_parsed, self.transitionsin_count = self.get_transition_table(self.last_line_in, self.ions_in[index][0], self.ions_in[index][1])
self.transitionsout_parsed, self.transitionsout_count = self.get_transition_table(self.last_line_out, self.ions_out[index][0], self.ions_out[index][1])
self.transitionsindata = SimpleTableModel([self.transitionsin_parsed, ['Lines In']])
self.transitionsoutdata = SimpleTableModel([self.transitionsout_parsed, ['Lines Out']])
self.transitionsindata.addData(self.transitionsin_count)
self.transitionsoutdata.addData(self.transitionsout_count)
self.transitionsintable2.setModel(self.transitionsindata)
self.transitionsouttable2.setModel(self.transitionsoutdata)
self.transitionsintable2.show()
self.transitionsouttable2.show()
self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 750, 400)
self.show()
class SimpleTableModel(QtCore.QAbstractTableModel):
def __init__(self, headerdata=None, iterate_header=(0, 0), index_info=None, parent=None, *args):
super(SimpleTableModel, self).__init__(parent, *args)
self.headerdata = headerdata
self.arraydata = []
self.iterate_header = iterate_header
self.index_info = index_info
def addData(self, datain):
self.arraydata.append(datain)
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.arraydata[0])
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self.arraydata)
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:
if self.iterate_header[0] == 1:
return self.headerdata[0][0] + str(section + 1)
elif self.iterate_header[0] == 2:
if self.index_info:
return self.headerdata[0][0] + str(self.index_info[section])
else:
return self.headerdata[0][0] + str(section + 1)
else:
return self.headerdata[0][section]
elif orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
if self.iterate_header[1] == 1:
return self.headerdata[1][0] + str(section + 1)
elif self.iterate_header[1] == 2:
if self.index_info:
return self.headerdata[1][0] + str(self.index_info[section])
else:
return self.headerdata[1][section]
return ""
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
elif role != QtCore.Qt.DisplayRole:
return None
return (self.arraydata[index.column()][index.row()])
def setData(self, index, value, role=QtCore.Qt.EditRole):
if not index.isValid():
return False
elif role != QtCore.Qt.EditRole:
return False
self.arraydata[index.column()][index.row()] = value
self.emit(QtCore.SIGNAL('dataChanged(const QModelIndex &, const QModelIndex &)'), index, index)
return True
def updateTable(self):
for r in range(self.rowCount()):
for c in range(self.columnCount()):
index = self.createIndex(r, c)
self.setData(index, self.arraydata[c][r])
class MatplotlibWidget(FigureCanvas):
def __init__(self, parent, fig=None):
self.parent = parent
self.figure = Figure()
self.cid = {}
if fig != 'model':
self.ax = self.figure.add_subplot(111)
else:
self.gs = gridspec.GridSpec(2, 1, height_ratios=[1, 3])
self.ax1 = self.figure.add_subplot(self.gs[0])
self.ax2 = self.figure.add_subplot(self.gs[1])
self.cb = None
self.span = None
super(MatplotlibWidget, self).__init__(self.figure)
super(MatplotlibWidget, self).setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
super(MatplotlibWidget, self).updateGeometry()
if fig != 'model':
self.toolbar = NavigationToolbar(self, parent)
self.cid[0] = self.figure.canvas.mpl_connect('pick_event', self.on_span_pick)
else:
self.cid[0] = self.figure.canvas.mpl_connect('pick_event', self.on_shell_pick)
def show_line_info(self):
self.parent.line_info.append(LineInfo(self.parent, self.span.xy[0][0], self.span.xy[2][0]))
def show_span(self, garbage=0, left=5000, right=10000):
if self.parent.spectrum_span_button.text() == 'Show Wavelength Range':
if not self.span:
self.span = self.ax.axvspan(left, right, color='r', alpha=0.3, picker=self.span_picker)
else:
self.span.set_visible(True)
self.parent.spectrum_line_info_button.show()
self.parent.spectrum_span_button.setText('Hide Wavelength Range')
else:
self.span.set_visible(False)
self.parent.spectrum_line_info_button.hide()
self.parent.spectrum_span_button.setText('Show Wavelength Range')
self.draw()
def on_span_pick(self, event):
self.figure.canvas.mpl_disconnect(self.cid[0])
self.span.set_edgecolor('m')
self.span.set_linewidth(5)
self.draw()
if event.edge == 'left':
self.cid[1] = self.figure.canvas.mpl_connect('motion_notify_event', self.on_span_left_motion)
elif event.edge == 'right':
self.cid[1] = self.figure.canvas.mpl_connect('motion_notify_event', self.on_span_right_motion)
self.cid[2] = self.figure.canvas.mpl_connect('button_press_event', self.on_span_resized)
def on_span_left_motion(self, mouseevent):
if mouseevent.xdata < self.span.xy[2][0]:
self.span.xy[0][0] = mouseevent.xdata
self.span.xy[1][0] = mouseevent.xdata
self.span.xy[4][0] = mouseevent.xdata
self.draw()
def on_span_right_motion(self, mouseevent):
if mouseevent.xdata > self.span.xy[0][0]:
self.span.xy[2][0] = mouseevent.xdata
self.span.xy[3][0] = mouseevent.xdata
self.draw()
def on_span_resized(self, mouseevent):
self.figure.canvas.mpl_disconnect(self.cid[1])
self.figure.canvas.mpl_disconnect(self.cid[2])
self.cid[0] = self.figure.canvas.mpl_connect('pick_event', self.on_span_pick)
self.span.set_edgecolor('r')
self.span.set_linewidth(1)
self.draw()
def on_shell_pick(self, event):
self.highlight_shell(event.artist.index)
def highlight_shell(self, index):
self.parent.tableview.selectRow(index)
for i in range(len(self.parent.shells)):
if i != index and i != index + 1:
self.parent.shells[i].set_edgecolor('k')
else:
self.parent.shells[i].set_edgecolor('w')
self.draw()
def shell_picker(self, shell, mouseevent):
if mouseevent.xdata is None:
return False, dict()
mouse_r2 = mouseevent.xdata ** 2 + mouseevent.ydata ** 2
if shell.r_inner ** 2 < mouse_r2 < shell.r_outer ** 2:
return True, dict()
return False, dict()
def span_picker(self, span, mouseevent, tolerance=5):
left = float(span.xy[0][0])
right = float(span.xy[2][0])
tolerance = span.axes.transData.inverted().transform((tolerance, 0))[0] - span.axes.transData.inverted().transform((0, 0))[0]
event_attributes = {'edge': None}
if mouseevent.xdata is None:
return False, event_attributes
if left - tolerance <= mouseevent.xdata <= left + tolerance:
event_attributes['edge'] = 'left'
return True, event_attributes
elif right - tolerance <= mouseevent.xdata <= right + tolerance:
event_attributes['edge'] = 'right'
return True, event_attributes
return False, event_attributes
class Shell(matplotlib.patches.Wedge):
def __init__(self, index, center, r_inner, r_outer, **kwargs):
super(Shell, self).__init__(center, r_outer, 0, 90, width=r_outer - r_inner, **kwargs)
self.index = index
self.center = center
self.r_outer = r_outer
self.r_inner = r_inner
self.width = r_outer - r_inner
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 142 | 5990 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
fzr72725/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
cbecker/LightGBM | tests/python_package_test/test_sklearn.py | 3 | 6123 | # coding: utf-8
# pylint: skip-file
import unittest
import lightgbm as lgb
import numpy as np
from sklearn.base import clone
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
load_svmlight_file)
from sklearn.externals import joblib
from sklearn.metrics import log_loss, mean_squared_error
from sklearn.model_selection import GridSearchCV, train_test_split
class template(object):
@staticmethod
def test_template(X_y=load_boston(True), model=lgb.LGBMRegressor,
feval=mean_squared_error, num_round=100,
custom_obj=None, predict_proba=False,
return_data=False, return_model=False):
X_train, X_test, y_train, y_test = train_test_split(*X_y, test_size=0.1, random_state=42)
if return_data:
return X_train, X_test, y_train, y_test
arguments = {'n_estimators': num_round, 'silent': True}
if custom_obj:
arguments['objective'] = custom_obj
gbm = model(**arguments)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False)
if return_model:
return gbm
elif predict_proba:
return feval(y_test, gbm.predict_proba(X_test))
else:
return feval(y_test, gbm.predict(X_test))
class TestSklearn(unittest.TestCase):
def test_binary(self):
X_y = load_breast_cancer(True)
ret = template.test_template(X_y, lgb.LGBMClassifier, log_loss, predict_proba=True)
self.assertLess(ret, 0.15)
def test_regreesion(self):
self.assertLess(template.test_template() ** 0.5, 4)
def test_multiclass(self):
X_y = load_digits(10, True)
def multi_error(y_true, y_pred):
return np.mean(y_true != y_pred)
ret = template.test_template(X_y, lgb.LGBMClassifier, multi_error)
self.assertLess(ret, 0.2)
def test_lambdarank(self):
X_train, y_train = load_svmlight_file('../../examples/lambdarank/rank.train')
X_test, y_test = load_svmlight_file('../../examples/lambdarank/rank.test')
q_train = np.loadtxt('../../examples/lambdarank/rank.train.query')
q_test = np.loadtxt('../../examples/lambdarank/rank.test.query')
lgb_model = lgb.LGBMRanker().fit(X_train, y_train,
group=q_train,
eval_set=[(X_test, y_test)],
eval_group=[q_test],
eval_at=[1],
verbose=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)])
def test_regression_with_custom_objective(self):
def objective_ls(y_true, y_pred):
grad = (y_pred - y_true)
hess = np.ones(len(y_true))
return grad, hess
ret = template.test_template(custom_obj=objective_ls)
self.assertLess(ret, 100)
def test_binary_classification_with_custom_objective(self):
def logregobj(y_true, y_pred):
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
grad = y_pred - y_true
hess = y_pred * (1.0 - y_pred)
return grad, hess
X_y = load_digits(2, True)
def binary_error(y_test, y_pred):
return np.mean([int(p > 0.5) != y for y, p in zip(y_test, y_pred)])
ret = template.test_template(X_y, lgb.LGBMClassifier, feval=binary_error, custom_obj=logregobj)
self.assertLess(ret, 0.1)
def test_dart(self):
X_train, X_test, y_train, y_test = template.test_template(return_data=True)
gbm = lgb.LGBMRegressor(boosting_type='dart')
gbm.fit(X_train, y_train)
self.assertLessEqual(gbm.score(X_train, y_train), 1.)
def test_grid_search(self):
X_train, X_test, y_train, y_test = template.test_template(return_data=True)
params = {'boosting_type': ['dart', 'gbdt'],
'n_estimators': [15, 20],
'drop_rate': [0.1, 0.2]}
gbm = GridSearchCV(lgb.LGBMRegressor(), params, cv=3)
gbm.fit(X_train, y_train)
self.assertIn(gbm.best_params_['n_estimators'], [15, 20])
def test_clone_and_property(self):
gbm = template.test_template(return_model=True)
gbm_clone = clone(gbm)
self.assertIsInstance(gbm.booster_, lgb.Booster)
self.assertIsInstance(gbm.feature_importances_, np.ndarray)
clf = template.test_template(load_digits(2, True), model=lgb.LGBMClassifier, return_model=True)
self.assertListEqual(sorted(clf.classes_), [0, 1])
self.assertEqual(clf.n_classes_, 2)
self.assertIsInstance(clf.booster_, lgb.Booster)
self.assertIsInstance(clf.feature_importances_, np.ndarray)
def test_joblib(self):
gbm = template.test_template(num_round=10, return_model=True)
joblib.dump(gbm, 'lgb.pkl')
gbm_pickle = joblib.load('lgb.pkl')
self.assertIsInstance(gbm_pickle.booster_, lgb.Booster)
self.assertDictEqual(gbm.get_params(), gbm_pickle.get_params())
self.assertListEqual(list(gbm.feature_importances_), list(gbm_pickle.feature_importances_))
X_train, X_test, y_train, y_test = template.test_template(return_data=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
gbm_pickle.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
for key in gbm.evals_result_:
for evals in zip(gbm.evals_result_[key], gbm_pickle.evals_result_[key]):
self.assertAlmostEqual(*evals, places=5)
pred_origin = gbm.predict(X_test)
pred_pickle = gbm_pickle.predict(X_test)
self.assertEqual(len(pred_origin), len(pred_pickle))
for preds in zip(pred_origin, pred_pickle):
self.assertAlmostEqual(*preds, places=5)
print("----------------------------------------------------------------------")
print("running test_sklearn.py")
unittest.main()
| mit |
gakarak/BTBDB_ImageAnalysisSubPortal | app/core/segmct/imtransform.py | 1 | 28712 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
from abc import abstractmethod
from scipy.interpolate import RegularGridInterpolator
import numpy as np
import random
import matplotlib.pyplot as plt
import os
import skimage as sk
import skimage.io as skio
import skimage.transform
import scipy as sc
import scipy.ndimage
import scipy.interpolate
import numpy.matlib
import nibabel as nib
from scipy.ndimage.interpolation import map_coordinates
###############################################
def makeTransform(lstMat):
ret = lstMat[0].copy()
for xx in lstMat[1:]:
ret = ret.dot(xx)
return ret
###############################################
# Handmade scipy-based affine transformation functions for 2d/3d images: fuck you ITK/VTK!
# 2d-images
def affine_transformation_2d(image, pshift, protCnt, protAngle, pscale, pcropSize,
pshear=(0., 0.),
isDebug=False,
pmode='constant',
pval=0,
porder=3):
"""
scipy-based 2d image transformation: for data augumentation
:param image: input 2d-image with 1 or more channels
:param pshift: shift of coordinates in row/column notation: (dr, dc)
:param protCnt: rotation center in row/column notation: (r0, c0)
:param protAngle: rotation angle (anti-clock-wise)
:param pscale: scale transformation
:param pcropSize: output size of cropped image (in row/col notation),
if None, then output image shape is equal to input image shape
:param pshear: shear-transform coefficients: (s_row, s_col)
:param isDebug: if True - show the debug visualization
:param pmode: parameter is equal 'mode' :parameter in scipy.ndimage.interpolation.affine_transform
:param pval: parameter is equal 'val' :parameter in scipy.ndimage.interpolation.affine_transform
:param porder: parameter is equal 'order' :parameter in scipy.ndimage.interpolation.affine_transform
:return: transformed 2d image
"""
# (1) precalc parameters
angRad = (np.pi / 180.) * protAngle
cosa = np.cos(angRad)
sina = np.sin(angRad)
# (2) prepare separate affine transformation matrices
# (2.1) shift matrix: all matrices in row/column notation (this notation
# is default for numpy 2d-arrays. Do not confuse with XY-notation!)
matShift = np.array([
[1., 0., +pshift[0]],
[0., 1., +pshift[1]],
[0., 0., 1.]
])
# (2.2) shift matrices for rotation: backward and forward
matShiftB = np.array([
[1., 0., -protCnt[0]],
[0., 1., -protCnt[1]],
[0., 0., 1.]
])
matShiftF = np.array([
[1., 0., +protCnt[0]],
[0., 1., +protCnt[1]],
[0., 0., 1.]
])
# (2.3) rotation matrix
matRot = np.array([
[+cosa, -sina, 0.],
[+sina, +cosa, 0.],
[0., 0., 1.]
])
# (2.4) scale matrix
matScale = np.array([
[pscale, 0., 0.],
[0., pscale, 0.],
[0., 0., 1.]
])
# (2.5) shear matrix
matShear = np.array([
[1., pshear[0], 0.],
[pshear[1], 1., 0.],
[0., 0., 1.],
])
# (3) build total-matrix
if pcropSize is None:
# matTotal = matShiftF.dot(matRot.dot(matScale.dot(matShiftB)))
matTotal = makeTransform([matShiftF, matRot, matShear, matScale, matShiftB])
pcropSize = image.shape[:2]
else:
matShiftCrop = np.array([
[1., 0., pcropSize[0] / 2.],
[0., 1., pcropSize[1] / 2.],
[0., 0., 1.]
])
# matTotal = matShiftCrop.dot(matRot.dot(matScale.dot(matShiftB)))
matTotal = makeTransform([matShiftCrop, matRot, matShear, matScale, matShiftB])
# (3.1) shift after rotation anf scale transformation
matTotal = matShift.dot(matTotal)
# (3.2) create inverted matrix for back-projected mapping
matTotalInv = np.linalg.inv(matTotal)
# (4) warp image with total affine-transform
idxRC = np.indices(pcropSize).reshape(2, -1)
idxRCH = np.insert(idxRC, 2, values=[1], axis=0)
idxRCHT = matTotalInv.dot(idxRCH)[:2, :]
if image.ndim>2:
tret = []
for ii in range(image.shape[-1]):
tret.append(map_coordinates(image[:,:,ii], idxRCHT, order=porder, cval=pval, mode=pmode).reshape(pcropSize))
tret = np.dstack(tret)
else:
tret = map_coordinates(image, idxRCHT, order=porder, cval=pval, mode=pmode).reshape(pcropSize)
# (5)
if isDebug:
pcntPrj = matTotal.dot(list(protCnt) + [1])[:2]
print (':: Total matrix:\n{0}'.format(matTotal))
print ('---')
print (':: Total matrix inverted:\n{0}'.format(matTotalInv))
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.gcf().gca().add_artist(plt.Circle(protCnt[::-1], 5, edgecolor='r', fill=False))
plt.title('Shift={0}, RotCenter={3}, Rot={1}, Scale={2}'.format(pshift, pscale, protAngle, protCnt))
plt.subplot(1, 2, 2)
plt.imshow(tret)
plt.gcf().gca().add_artist(plt.Circle(pcntPrj[::-1], 5, edgecolor='r', fill=False))
plt.title('Shape = {0}'.format(tret.shape))
plt.show()
return tret
# 3d-images
def affine_transformation_3d(image3d, pshiftXYZ, protCntXYZ, protAngleXYZ, pscaleXYZ, pcropSizeXYZ,
pshear=(0., 0., 0., 0., 0., 0.),
isRandomizeRot=False,
isDebug=False,
pmode='constant',
pval=0,
porder=0):
"""
scipy-based 3d image transformation: for data augumentation
:param image3d: input 3d-image with 1 or more channels (with shape like [sizX, sizY, sizZ]
or [sizX, sizY, sizZ, num_channels])
:param pshiftXYZ: shift of coordinates: (dx, dy, dz)
:param protCntXYZ: rotation center: (x0, y0, z0)
:param protAngleXYZ: rotation angle (anti-clock-wise), like: (angle_x, angle_y, angle_z)
:param pscaleXYZ: cale transformation: (sx, sy, sz)
:param pcropSizeXYZ: output size of cropped image, like: [outSizeX, outSizeY, outSizeZ].If None,
then output image shape is equal to input image shape
:param pshear: shear-transform 3D coefficients. Two possible formats:
- 6-dimensional vector, like: (Syx, Szx, Sxy, Szy, Sxz, Syz)
- 3x3 matrix, like:
[ 1, Syx, Szx]
[Sxy, 1, Szy]
[Sxz, Syz, 1]
:param isRandomizeRot: if True - random shuffle order of X/Y/Z rotation
:param isDebug: if True - show the debug visualization
:param pmode: parameter is equal 'mode' :parameter in scipy.ndimage.interpolation.affine_transform
:param pval: parameter is equal 'val' :parameter in scipy.ndimage.interpolation.affine_transform
:param porder: parameter is equal 'order' :parameter in scipy.ndimage.interpolation.affine_transform
:return: transformed 3d image
"""
nshp=3
# (1) precalc parameters
angRadXYZ = (np.pi / 180.) * np.array(protAngleXYZ)
cosaXYZ = np.cos(angRadXYZ)
sinaXYZ = np.sin(angRadXYZ)
# (2) prepare separate affine transformation matrices
# (2.0) shift matrices
matShiftXYZ = np.array([
[1., 0., 0., +pshiftXYZ[0]],
[0., 1., 0., +pshiftXYZ[1]],
[0., 0., 1., +pshiftXYZ[2]],
[0., 0., 0., 1.]
])
# (2.1) shift-matrices for rotation: backward and forward
matShiftB = np.array([
[1., 0., 0., -protCntXYZ[0]],
[0., 1., 0., -protCntXYZ[1]],
[0., 0., 1., -protCntXYZ[2]],
[0., 0., 0., 1.]
])
matShiftF = np.array([
[1., 0., 0., +protCntXYZ[0]],
[0., 1., 0., +protCntXYZ[1]],
[0., 0., 1., +protCntXYZ[2]],
[0., 0., 0., 1.]
])
# (2.2) partial and full-rotation matrix
lstMatRotXYZ = []
for ii in range(len(angRadXYZ)):
cosa = cosaXYZ[ii]
sina = sinaXYZ[ii]
if ii==0:
# Rx
tmat = np.array([
[1., 0., 0., 0.],
[0., +cosa, -sina, 0.],
[0., +sina, +cosa, 0.],
[0., 0., 0., 1.]
])
elif ii==1:
# Ry
tmat = np.array([
[+cosa, 0., +sina, 0.],
[ 0., 1., 0., 0.],
[-sina, 0., +cosa, 0.],
[ 0., 0., 0., 1.]
])
else:
# Rz
tmat = np.array([
[+cosa, -sina, 0., 0.],
[+sina, +cosa, 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]
])
lstMatRotXYZ.append(tmat)
if isRandomizeRot:
random.shuffle(lstMatRotXYZ)
matRotXYZ = lstMatRotXYZ[0].copy()
for mm in lstMatRotXYZ[1:]:
matRotXYZ = matRotXYZ.dot(mm)
# (2.3) scale matrix
sx,sy,sz = pscaleXYZ
matScale = np.array([
[sx, 0., 0., 0.],
[0., sy, 0., 0.],
[0., 0., sz, 0.],
[0., 0., 0., 1.],
])
# (2.4) shear matrix
if pshear is not None:
if len(pshear) == 6:
matShear = np.array([
[1., pshear[0], pshear[1], 0.],
[pshear[2], 1., pshear[3], 0.],
[pshear[4], pshear[5], 1., 0.],
[0., 0., 0., 1.]
])
else:
matShear = np.eye(4, 4)
pshear = np.array(pshear)
if pshear.shape == (3, 3):
matShear[:3, :3] = pshear
elif pshear.shape == (4, 4):
matShear = pshear
else:
raise Exception('Invalid shear-matrix format: [{0}]'.format(pshear))
else:
matShear = np.eye(4, 4)
# (3) build total-matrix
if pcropSizeXYZ is None:
# matTotal = matShiftF.dot(matRotXYZ.dot(matScale.dot(matShiftB)))
matTotal = makeTransform([matShiftF, matRotXYZ, matShear, matScale, matShiftB])
pcropSizeXYZ = image3d.shape[:nshp]
else:
matShiftCropXYZ = np.array([
[1., 0., 0., pcropSizeXYZ[0] / 2.],
[0., 1., 0., pcropSizeXYZ[1] / 2.],
[0., 0., 1., pcropSizeXYZ[2] / 2.],
[0., 0., 0., 1.]
])
# matTotal = matShiftCropXYZ.dot(matRotXYZ.dot(matScale.dot(matShiftB)))
matTotal = makeTransform([matShiftCropXYZ, matRotXYZ, matShear, matScale, matShiftB])
# (3.1) shift after rot-scale transformation
matTotal = matShiftXYZ.dot(matTotal)
# (3.2) invert matrix for back-projected mapping
matTotalInv = np.linalg.inv(matTotal)
# (4) warp image with total affine-transform
idxXYZ = np.indices(pcropSizeXYZ).reshape(nshp, -1)
idxXYZH = np.insert(idxXYZ, nshp, values=[1], axis=0)
idxXYZT = matTotalInv.dot(idxXYZH)[:nshp, :]
# (5) processing 3D layer-by-layer
if image3d.ndim>nshp:
tret = []
for ii in range(image3d.shape[-1]):
tret.append(map_coordinates(image3d[:, :, :, ii], idxXYZT, order=porder, cval=pval, mode=pmode).reshape(pcropSizeXYZ))
tret = np.dstack(tret)
else:
tret = map_coordinates(image3d, idxXYZT, order=porder, cval=pval, mode=pmode).reshape(pcropSizeXYZ)
# (6) Debug
if isDebug:
protCntXYZ = np.array(protCntXYZ)
protCntXYZPrj = matTotal.dot(list(protCntXYZ) + [1])[:nshp]
print (':: Total matrix:\n{0}'.format(matTotal))
print ('---')
print (':: Total matrix inverted:\n{0}'.format(matTotalInv))
s0, s1, s2 = image3d.shape
s0n, s1n, s2n = tret.shape
#
plt.subplot(3, 3, 1 + 0*nshp)
plt.imshow(image3d[s0 / 2, :, :])
plt.gcf().gca().add_artist(plt.Circle(protCntXYZ[[1, 2]], 5, edgecolor='r', fill=False))
plt.subplot(3, 3, 2 + 0*nshp)
plt.imshow(image3d[:, s1 / 2, :])
plt.gcf().gca().add_artist(plt.Circle(protCntXYZ[[0, 2]], 5, edgecolor='r', fill=False))
plt.subplot(3, 3, 3 + 0*nshp)
plt.imshow(image3d[:, :, s2 / 2])
plt.gcf().gca().add_artist(plt.Circle(protCntXYZ[[0, 1]], 5, edgecolor='r', fill=False))
#
plt.subplot(3, 3, 1 + 1*nshp)
plt.imshow(tret[s0n / 2, :, :])
plt.gcf().gca().add_artist(plt.Circle(protCntXYZPrj[[1, 2]], 5, edgecolor='r', fill=False))
plt.subplot(3, 3, 2 + 1*nshp)
plt.imshow(tret[:, s1n / 2, :])
plt.gcf().gca().add_artist(plt.Circle(protCntXYZPrj[[0, 2]], 5, edgecolor='r', fill=False))
plt.subplot(3, 3, 3 + 1*nshp)
plt.imshow(tret[:, :, s2n / 2])
plt.gcf().gca().add_artist(plt.Circle(protCntXYZPrj[[0, 1]], 5, edgecolor='r', fill=False))
#
plt.subplot(3, 3, 1 + 2 * nshp)
plt.imshow(np.sum(tret, axis=0))
plt.gcf().gca().add_artist(plt.Circle(protCntXYZPrj[[1, 2]], 5, edgecolor='r', fill=False))
plt.subplot(3, 3, 2 + 2 * nshp)
plt.imshow(np.sum(tret, axis=1))
plt.gcf().gca().add_artist(plt.Circle(protCntXYZPrj[[0, 2]], 5, edgecolor='r', fill=False))
plt.subplot(3, 3, 3 + 2 * nshp)
plt.imshow(np.sum(tret, axis=2))
plt.gcf().gca().add_artist(plt.Circle(protCntXYZPrj[[0, 1]], 5, edgecolor='r', fill=False))
plt.show()
return tret
def generateDistortionMat2d(pshape, pgrid=(5, 5), prnd=0.2, isProportionalGrid=True):
if isProportionalGrid:
pgridVal = max(pgrid)
if pshape[0] < pshape[1]:
pgrid = (pgridVal, int(float(pshape[1]) * pgridVal / pshape[0]))
else:
pgrid = (int(float(pshape[0]) * pgridVal / pshape[1]), pgridVal)
sizImg = pshape[:2]
gxy = pgrid
dxy = np.array(sizImg[::-1]) // np.array(gxy)
rx = np.linspace(0, sizImg[1], gxy[0])
ry = np.linspace(0, sizImg[0], gxy[1])
XX, YY = np.meshgrid(rx, ry)
rndXX = np.random.uniform(0, dxy[0] * prnd, XX.shape)
rndYY = np.random.uniform(0, dxy[1] * prnd, YY.shape)
XXrnd = XX.copy()
YYrnd = YY.copy()
XXrnd[1:-1, 1:-1] = XX[1:-1, 1:-1] + rndXX[1:-1, 1:-1]
YYrnd[1:-1, 1:-1] = YY[1:-1, 1:-1] + rndYY[1:-1, 1:-1]
fx = sc.interpolate.interp2d(XX, YY, XXrnd, kind='cubic')
fy = sc.interpolate.interp2d(XX, YY, YYrnd, kind='cubic')
rx_new = np.linspace(0, sizImg[1] - 1, sizImg[1])
ry_new = np.linspace(0, sizImg[0] - 1, sizImg[0])
XX_pert = fx(rx_new, ry_new)
YY_pert = fy(rx_new, ry_new)
shiftXYZ = np.matlib.repeat(np.expand_dims(np.stack((YY_pert, XX_pert, np.zeros(XX_pert.shape))), axis=-1), 3, axis=-1)
return shiftXYZ
def generateDistortionMat3d(pshape, pgrid=(5, 5, 5), prnd=0.8, isProportionalGrid=True, isNormZScale = True):
if isProportionalGrid:
pgridVal = max(pgrid)
pminVal = np.min(pshape)
pminIdx = np.argmin(pshape)
pgrid = np.array([int(float(xx) * pgridVal / pminVal) for xx in pshape])
pgrid[pminIdx] = pgridVal
sizImg3D = pshape
gxyz = pgrid
dxyz = np.array(sizImg3D) / np.array(gxyz)
rxyz = [np.linspace(0, sizImg3D[ii], gxyz[ii]) for ii in range(3)]
XYZ = np.array(np.meshgrid(rxyz[0], rxyz[1], rxyz[2]))
rndXYZ = np.random.uniform(0, dxyz[0] * prnd, XYZ.shape)
if isNormZScale:
rndXYZ[-1] *= float(pshape[-1])/pshape[0] #FIXME: potential bug
XYZrnd = XYZ.copy()
for ii in range(3):
XYZrnd[ii][1:-1, 1:-1, 1:-1] += rndXYZ[ii][1:-1, 1:-1, 1:-1]
#
rxyz_new = [np.linspace(0, sizImg3D[ii] - 1, sizImg3D[ii]) for ii in range(3)]
XYZ_new = np.array(np.meshgrid(rxyz_new[0], rxyz_new[1], rxyz_new[2]))
# rxyz_new =
# q = sc.interpolate.interpn(XYZ.reshape(3, -1), XYZrnd, XYZ_new.reshape(3, -1))
q0 = sc.interpolate.interpn(rxyz, XYZrnd[0], XYZ_new.reshape(3, -1).T).reshape(XYZ_new.shape[1:])
q1 = sc.interpolate.interpn(rxyz, XYZrnd[1], XYZ_new.reshape(3, -1).T).reshape(XYZ_new.shape[1:])
q2 = sc.interpolate.interpn(rxyz, XYZrnd[2], XYZ_new.reshape(3, -1).T).reshape(XYZ_new.shape[1:])
return np.stack((q0,q1,q2))
###############################################
def _draw_debug_3d_image(pimg3d, pmsk3d, ext_img3d = None, isShow = True, isNewFigure = False):
tsiz = pimg3d.shape[-1] // 2
timg_2d = pimg3d[:, :, tsiz]
tmsk_2d = pmsk3d[:, :, tsiz]
tmsk_2d_n = (tmsk_2d - tmsk_2d.min()) / float(tmsk_2d.max() - tmsk_2d.min() + 0.001)
timg_2d_n = (timg_2d - timg_2d.min()) / float(timg_2d.max() - timg_2d.min() + 0.001)
if isNewFigure:
plt.figure()
nxy = 3 if ext_img3d is None else 4
plt.subplot(1, nxy, 1)
plt.imshow(timg_2d), plt.title('image')
plt.subplot(1, nxy, 2)
plt.imshow(tmsk_2d), plt.title('mask, unique = {}'.format(np.unique(pmsk3d)))
plt.subplot(1, nxy, 3)
plt.imshow(np.dstack([tmsk_2d_n, timg_2d_n, timg_2d_n]))
plt.title('image + mask composite')
if ext_img3d is not None:
plt.subplot(1, nxy, 4)
plt.imshow(ext_img3d[:, :, tsiz])
if isShow:
plt.show()
class Augumentor3DBasic:
def __init__(self, prob = 0.5):
self.prob = prob
@abstractmethod
def process(self, pimg3d, pmsk3d):
raise NotImplementedError
class MultiAugumentor3D:
def __init__(self, lst_aug = None):
if lst_aug is None:
self.lst_aug = []
else:
self.lst_aug = lst_aug
def add_aug(self, paug):
self.lst_aug.append(paug)
def process(self, pimg3d, pmsk3d):
ret_img3d, ret_msk3d = pimg3d, pmsk3d
for aug_proc in self.lst_aug:
ret_img3d, ret_msk3d = aug_proc.process(ret_img3d, ret_msk3d)
return (ret_img3d, ret_msk3d)
class Augumentor3D_Identity(Augumentor3DBasic):
def __init__(self, prob=0.5, isDebug = False):
super().__init__(prob)
self.isDebug = isDebug
def process(self, pimg3d, pmsk3d):
if isinstance(pimg3d, str):
pimg3d = nib.load(pimg3d).get_data()
pmsk3d = nib.load(pmsk3d).get_data()
if self.isDebug:
_draw_debug_3d_image(pimg3d, pmsk3d, isShow=True, isNewFigure=False)
return pimg3d, pmsk3d
class Augumentor3DGeom_Affine(Augumentor3DBasic):
def __init__(self, prob=0.5, dxyz = (10., 10., 10.), dangle=(-10., +10.), dscale=(0.9, 1.1),
dshear = 0., mode = 'nearest', order = 1, order_msk = 0, isDebug = False, isNoZAngleDisturb=True):
super().__init__(prob)
self.dxyz = dxyz
self.dangle = dangle
self.dscale = dscale
self.dshear = dshear
self.mode = mode
self.order = order
self.order_msk = order_msk
self.isNoZAngleDisturb = isNoZAngleDisturb
self.isDebug = isDebug
def process(self, pimg3d, pmsk3d):
if isinstance(pimg3d, str):
pimg3d = nib.load(pimg3d).get_data()
pmsk3d = nib.load(pmsk3d).get_data()
assert (pimg3d.shape == pmsk3d.shape)
assert (pmsk3d.ndim == 3)
tprob = np.random.uniform(0.0, 1.0)
if tprob > self.prob:
return (pimg3d, pmsk3d)
siz_crop = pimg3d.shape
xyz_cnt = tuple((np.array(pimg3d.shape[:3])//2).tolist())
xyz_shift = [np.random.uniform(-xx, +xx) for xx in self.dxyz]
xyz_angle = np.random.uniform(self.dangle[0], self.dangle[1], 3)
if self.isNoZAngleDisturb:
xyz_angle[-1] = 0.
xyz_angle = xyz_angle.tolist()
xyz_scale = np.random.uniform(self.dscale[0], self.dscale[1], 3).tolist()
xyz_shear = np.random.uniform(-self.dshear, +self.dshear, 6).tolist()
#
ret_img3d, ret_msk3d = [affine_transformation_3d(xx,
pshiftXYZ=xyz_shift,
protCntXYZ=xyz_cnt,
protAngleXYZ=xyz_angle,
pscaleXYZ=xyz_scale,
pcropSizeXYZ=siz_crop,
pshear = xyz_shear,
pmode=self.mode, porder=xx_order) for xx, xx_order in
zip( [pimg3d, pmsk3d], [self.order, self.order_msk])]
if self.isDebug:
_draw_debug_3d_image(ret_img3d, ret_msk3d, isShow=True, isNewFigure=False)
return ret_img3d, ret_msk3d
class Augumentor3DGeom_Distortion(Augumentor3DBasic):
def __init__(self, prob=0.5, grid = (5, 5, 5), prnd=0.3, isProportionalGrid = False, mode = 'nearest',
order = 1, order_msk = 0, isDebug = False, isNormZScale = True):
super().__init__(prob)
self.grid = grid
self.prnd = prnd
self.mode = mode
self.order = order
self.order_msk = order_msk
self.isProportionalGrid = isProportionalGrid
self.isNormZScale = isNormZScale
self.isDebug = isDebug
def process(self, pimg3d, pmsk3d):
if isinstance(pimg3d, str):
pimg3d = nib.load(pimg3d).get_data()
pmsk3d = nib.load(pmsk3d).get_data()
assert (pimg3d.shape == pmsk3d.shape)
assert (pmsk3d.ndim == 3)
distMat3D = generateDistortionMat3d(pimg3d.shape, prnd=self.prnd, pgrid=self.grid,
isProportionalGrid=self.isProportionalGrid,
isNormZScale=self.isNormZScale)
ret_img3d = sc.ndimage.map_coordinates(pimg3d, distMat3D, mode=self.mode, order=self.order)
ret_msk3d = sc.ndimage.map_coordinates(pmsk3d, distMat3D, mode=self.mode, order=self.order_msk)
if self.isDebug:
_draw_debug_3d_image(ret_img3d, ret_msk3d, isShow=True, isNewFigure=False)
return ret_img3d, ret_msk3d
class Augumentor3DValues_GaussBlobs(Augumentor3DBasic):
def __init__(self, prob=0.5, diap_num_blobs = (2, 5), diap_rad = (0.1, 0.4), diap_val = (200, 800), isDebug = False):
super().__init__(prob)
self.diap_num_blobs = diap_num_blobs
self.diap_rad = diap_rad
self.diap_val = diap_val
self.isDebug = isDebug
def process(self, pimg3d, pmsk3d):
if isinstance(pimg3d, str):
pimg3d = nib.load(pimg3d).get_data()
pmsk3d = nib.load(pmsk3d).get_data()
assert (pimg3d.shape == pmsk3d.shape)
assert (pmsk3d.ndim == 3)
tsiz = pimg3d.shape[:3]
lin_xyz = [np.linspace(0, xx - 1, xx) for xx in tsiz]
XYZ = np.array(np.meshgrid(lin_xyz[0], lin_xyz[1], lin_xyz[2]))
idx_xyz = np.where(pmsk3d > 0)
tnum_blobs = np.random.randint(self.diap_num_blobs[0], self.diap_num_blobs[1])
rnd_idx = np.random.randint(0, len(idx_xyz[0]), tnum_blobs)
rnd_rad = np.random.uniform(self.diap_rad[0], self.diap_rad[1], tnum_blobs)
rnd_val = np.random.uniform(self.diap_val[0], self.diap_val[1], tnum_blobs)
ret_noise_gauss = None
for ii, (iidx, iival) in enumerate(zip(rnd_idx, rnd_val)):
tsigm_xyz = np.array(tsiz) * rnd_rad[ii]
tpos_xyz = [xx[iidx] for xx in idx_xyz]
tgauss = np.exp( -np.sum([0.5 * ((xyz - xyz0)/xyzs)**2 for xyz, xyz0, xyzs in zip(XYZ, tpos_xyz, tsigm_xyz)], axis=0) )
tgauss *= (iival/tgauss.max())
if ret_noise_gauss is None:
ret_noise_gauss = tgauss
else:
ret_noise_gauss += tgauss
ret_noise_gauss[pmsk3d < 0.1] = 0
ret_img3d = pimg3d + ret_noise_gauss
ret_msk3d = pmsk3d
if self.isDebug:
_draw_debug_3d_image(ret_img3d, ret_msk3d, ext_img3d=ret_noise_gauss, isShow=True, isNewFigure=False)
return ret_img3d, ret_msk3d
class Augumentor3DValues_GaussNoise(Augumentor3DBasic):
def __init__(self, prob=0.5, diap_scales = (0.1, 0.3), siz_edge_flt=3,
diap_mean = (300, 600), diap_sigm=(100, 200), isOnLungsMaskOnly=True, isDebug = False, bg_threshold = -3000.):
super().__init__(prob)
self.diap_scales = diap_scales
self.siz_edge_flt = siz_edge_flt
self.diap_mean = diap_mean
self.diap_sigm = diap_sigm
self.isOnLungsMaskOnly = isOnLungsMaskOnly
self.bg_threshold = bg_threshold
self.isDebug = isDebug
def process(self, pimg3d, pmsk3d):
if isinstance(pimg3d, str):
pimg3d = nib.load(pimg3d).get_data()
pmsk3d = nib.load(pmsk3d).get_data()
assert (pimg3d.shape == pmsk3d.shape)
assert (pmsk3d.ndim == 3)
tsiz = pimg3d.shape[:3]
rnd_scale = np.random.uniform(self.diap_scales[0], self.diap_scales[1])
tsiz_small = (np.array(tsiz) * rnd_scale).astype(np.int)
rnd_mean = np.random.uniform(self.diap_mean[0], self.diap_mean[1])
rnd_sigm = np.random.uniform(self.diap_sigm[0], self.diap_sigm[1])
gauss_noise3d = np.random.normal(rnd_mean, rnd_sigm, tsiz_small.tolist())
gauss_noise3d = sk.transform.resize(gauss_noise3d, tsiz, order=2)
if self.isOnLungsMaskOnly:
gauss_noise3d[pmsk3d<0.1] = 0
if self.siz_edge_flt > 0:
gauss_noise3d = scipy.ndimage.gaussian_filter(gauss_noise3d, self.siz_edge_flt)
ret_img3d = pimg3d + gauss_noise3d
ret_img3d[pimg3d < self.bg_threshold] = pimg3d[pimg3d < self.bg_threshold]
ret_msk3d = pmsk3d
if self.isDebug:
_draw_debug_3d_image(ret_img3d, ret_msk3d, ext_img3d=gauss_noise3d, isShow=True, isNewFigure=False)
return ret_img3d, ret_msk3d
class Augumentor3DValues_LinearNoise(Augumentor3DBasic):
def __init__(self, prob=0.5, diap_mean = (-50, 50), diap_scale = (0.95, 1.1), isOnLungsMaskOnly=True, isDebug = False, bg_threshold = -3000.):
super().__init__(prob)
self.diap_mean = diap_mean
self.diap_scale = diap_scale
self.isOnLungsMaskOnly = isOnLungsMaskOnly
self.bg_threshold = bg_threshold
self.isDebug = isDebug
def process(self, pimg3d, pmsk3d):
if isinstance(pimg3d, str):
pimg3d = nib.load(pimg3d).get_data()
pmsk3d = nib.load(pmsk3d).get_data()
assert (pimg3d.shape == pmsk3d.shape)
assert (pmsk3d.ndim == 3)
rnd_mean = np.random.uniform(self.diap_mean[0], self.diap_mean[1])
rnd_scale = np.random.uniform(self.diap_scale[0], self.diap_scale[1])
ret_img3d = pimg3d.copy()
ret_msk3d = pmsk3d
if self.isOnLungsMaskOnly:
ret_img3d[pmsk3d > 0] = rnd_mean + pimg3d[pmsk3d > 0] * rnd_scale
ret_img3d[pimg3d < self.bg_threshold] = pimg3d[pimg3d < self.bg_threshold]
else:
ret_img3d[pimg3d > self.bg_threshold] = rnd_mean + rnd_scale * pimg3d[pimg3d > self.bg_threshold]
if self.isDebug:
_draw_debug_3d_image(ret_img3d, ret_msk3d, ext_img3d=pimg3d, isShow=True, isNewFigure=False)
return ret_img3d, ret_msk3d
###############################################
if __name__ == '__main__':
fnii_img = '/home/ar/data/crdf/data02_ct_lung_segm/data02_ct_lung_segm/data-luna16/lungs-img/id100225287222365663678666836860-256x256x64.nii.gz'
fnii_msk = '/home/ar/data/crdf/data02_ct_lung_segm/data02_ct_lung_segm/data-luna16/lungs-msk/id100225287222365663678666836860-256x256x64.nii.gz'
#
inp_img = nib.load(fnii_img).get_data().astype(np.float32)
inp_msk = nib.load(fnii_msk).get_data()
inp_msk = ((inp_msk == 3) | (inp_msk == 4)).astype(np.float32)
# aug_affine = Augumentor3DGeom_Affine(prob=1.0, isDebug=True)
# img, msk = aug_affine.process(fnii_img, fnii_msk)
# aug_distortion = Augumentor3DGeom_Distortion(prob=1.0, isDebug=True, order=1, order_msk=1)
# img, msk = aug_distortion.process(inp_img, inp_msk)
aug_gaussblobs = Augumentor3DValues_GaussBlobs(prob=1.0, isDebug=True)
img, msk = aug_gaussblobs.process(inp_img, inp_msk)
# aug_gaussnoise = Augumentor3DValues_GaussNoise(prob=1.0, isDebug=True, siz_edge_flt=1, isOnLungsMaskOnly=False)
# img, msk = aug_gaussnoise.process(inp_img, inp_msk)
# aug_linearnoise = Augumentor3DValues_LinearNoise(prob=1.0, isDebug=True, isOnLungsMaskOnly=False)
# img, msk = aug_linearnoise.process(inp_img, inp_msk)
# aug_affine = Augumentor3DGeom_Affine(prob=1.0, isDebug=True)
# aug_gaussnoise = Augumentor3DValues_GaussNoise(prob=1.0, isDebug=True, siz_edge_flt=1)
# aug_affine_and_gaussnoise = MultiAugumentor3D([aug_affine, aug_gaussnoise])
# img, msk = aug_affine_and_gaussnoise.process(inp_img, inp_msk)
print('-')
| apache-2.0 |
RayMick/scikit-learn | sklearn/svm/tests/test_sparse.py | 70 | 12992 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
ozak/geopandas | benchmarks/geom_methods.py | 2 | 3834 | import random
import numpy as np
from geopandas import GeoSeries
from shapely.geometry import Point, LineString, Polygon
def with_attributes(**attrs):
def decorator(func):
for key, value in attrs.items():
setattr(func, key, value)
return func
return decorator
class Bench:
def setup(self, *args):
self.points = GeoSeries([Point(i, i) for i in range(100000)])
triangles = GeoSeries([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(1000)])
triangles2 = triangles.copy().iloc[np.random.choice(1000, 1000)]
triangles3 = GeoSeries([Polygon([(random.random(), random.random())
for _ in range(3)])
for _ in range(10000)])
triangle = Polygon([(random.random(), random.random())
for _ in range(3)])
self.triangles, self.triangles2 = triangles, triangles2
self.triangles_big = triangles3
self.triangle = triangle
@with_attributes(param_names=['op'],
params=[('contains', 'crosses', 'disjoint', 'intersects',
'overlaps', 'touches', 'within', 'geom_equals',
'geom_almost_equals', 'geom_equals_exact')])
def time_binary_predicate(self, op):
getattr(self.triangles, op)(self.triangle)
@with_attributes(param_names=['op'],
params=[('contains', 'crosses', 'disjoint', 'intersects',
'overlaps', 'touches', 'within', 'geom_equals',
'geom_almost_equals')]) # 'geom_equals_exact')])
def time_binary_predicate_vector(self, op):
getattr(self.triangles, op)(self.triangles2)
@with_attributes(param_names=['op'],
params=[('distance')])
def time_binary_float(self, op):
getattr(self.triangles, op)(self.triangle)
@with_attributes(param_names=['op'],
params=[('distance')])
def time_binary_float_vector(self, op):
getattr(self.triangles, op)(self.triangles2)
@with_attributes(param_names=['op'],
params=[('difference', 'symmetric_difference', 'union',
'intersection')])
def time_binary_geo(self, op):
getattr(self.triangles, op)(self.triangle)
@with_attributes(param_names=['op'],
params=[('difference', 'symmetric_difference', 'union',
'intersection')])
def time_binary_geo_vector(self, op):
getattr(self.triangles, op)(self.triangles2)
@with_attributes(param_names=['op'],
params=[('is_valid', 'is_empty', 'is_simple', 'is_ring')])
def time_unary_predicate(self, op):
getattr(self.triangles, op)
@with_attributes(param_names=['op'],
params=[('area', 'length')])
def time_unary_float(self, op):
getattr(self.triangles_big, op)
@with_attributes(param_names=['op'],
params=[('boundary', 'centroid', 'convex_hull',
'envelope', 'exterior', 'interiors')])
def time_unary_geo(self, op):
getattr(self.triangles, op)
def time_unary_geo_representative_point(self, *args):
getattr(self.triangles, 'representative_point')()
def time_geom_type(self, *args):
self.triangles_big.geom_type
def time_bounds(self, *args):
self.triangles.bounds
def time_unary_union(self, *args):
self.triangles.unary_union
def time_buffer(self, *args):
self.points.buffer(2)
# TODO
# project, interpolate, translate, rotate, scale, skew, explode
# cx indexer
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
ibis-project/ibis | benchmarks/benchmarks.py | 2 | 8068 | import numpy as np
import pandas as pd
import ibis
import ibis.expr.datatypes as dt
from ibis.backends.pandas.udf import udf
def make_t(name='t'):
return ibis.table(
(
('_timestamp', 'int32'),
('dim1', 'int32'),
('dim2', 'int32'),
('valid_seconds', 'int32'),
('meas1', 'int32'),
('meas2', 'int32'),
('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('hour', 'int32'),
('minute', 'int32'),
),
name=name,
)
def make_base(t):
return (
(t.year > 2016)
| ((t.year == 2016) & (t.month > 6))
| ((t.year == 2016) & (t.month == 6) & (t.day > 6))
| ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour > 6))
| (
(t.year == 2016)
& (t.month == 6)
& (t.day == 6)
& (t.hour == 6)
& (t.minute >= 5)
)
) & (
(t.year < 2016)
| ((t.year == 2016) & (t.month < 6))
| ((t.year == 2016) & (t.month == 6) & (t.day < 6))
| ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour < 6))
| (
(t.year == 2016)
& (t.month == 6)
& (t.day == 6)
& (t.hour == 6)
& (t.minute <= 5)
)
)
def make_large_expr(t, base):
src_table = t[base]
src_table = src_table.mutate(
_timestamp=(src_table['_timestamp'] - src_table['_timestamp'] % 3600)
.cast('int32')
.name('_timestamp'),
valid_seconds=300,
)
aggs = []
for meas in ['meas1', 'meas2']:
aggs.append(src_table[meas].sum().cast('float').name(meas))
src_table = src_table.aggregate(
aggs, by=['_timestamp', 'dim1', 'dim2', 'valid_seconds']
)
part_keys = ['year', 'month', 'day', 'hour', 'minute']
ts_col = src_table['_timestamp'].cast('timestamp')
new_cols = {}
for part_key in part_keys:
part_col = getattr(ts_col, part_key)()
new_cols[part_key] = part_col
src_table = src_table.mutate(**new_cols)
return src_table[
[
'_timestamp',
'dim1',
'dim2',
'meas1',
'meas2',
'year',
'month',
'day',
'hour',
'minute',
]
]
class Suite:
def setup(self):
self.t = t = make_t()
self.base = make_base(t)
self.expr = self.large_expr
@property
def large_expr(self):
t = make_t()
return make_large_expr(t, make_base(t))
class Construction(Suite):
def time_large_expr_construction(self):
self.large_expr
class Hashing(Suite):
def time_hash_small_expr(self):
hash(make_t())
def time_hash_medium_expr(self):
hash(make_base(make_t()))
def time_hash_large_expr(self):
hash(self.large_expr)
class Formatting(Suite):
def time_base_expr_formatting(self):
str(self.base)
def time_large_expr_formatting(self):
str(self.expr)
class Compilation(Suite):
def time_impala_base_compile(self):
ibis.impala.compile(self.base)
def time_impala_large_expr_compile(self):
ibis.impala.compile(self.expr)
class PandasBackend:
def setup(self):
n = 30 * int(2e4)
self.data = pd.DataFrame(
{
'key': np.random.choice(16000, size=n),
'low_card_key': np.random.choice(30, size=n),
'value': np.random.rand(n),
'timestamps': pd.date_range(
start='now', periods=n, freq='s'
).values,
'timestamp_strings': pd.date_range(
start='now', periods=n, freq='s'
).values.astype(str),
'repeated_timestamps': pd.date_range(
start='2018-09-01', periods=30
).repeat(int(n / 30)),
}
)
t = ibis.pandas.connect({'df': self.data}).table('df')
self.high_card_group_by = t.groupby(t.key).aggregate(
avg_value=t.value.mean()
)
self.cast_to_dates = t.timestamps.cast(dt.date)
self.cast_to_dates_from_strings = t.timestamp_strings.cast(dt.date)
self.multikey_group_by_with_mutate = (
t.mutate(dates=t.timestamps.cast('date'))
.groupby(['low_card_key', 'dates'])
.aggregate(avg_value=lambda t: t.value.mean())
)
self.simple_sort = t.sort_by([t.key])
self.simple_sort_projection = t[['key', 'value']].sort_by(['key'])
self.multikey_sort = t.sort_by(['low_card_key', 'key'])
self.multikey_sort_projection = t[
['low_card_key', 'key', 'value']
].sort_by(['low_card_key', 'key'])
low_card_rolling_window = ibis.trailing_range_window(
ibis.interval(days=2),
order_by=t.repeated_timestamps,
group_by=t.low_card_key,
)
self.low_card_grouped_rolling = t.value.mean().over(
low_card_rolling_window
)
high_card_rolling_window = ibis.trailing_range_window(
ibis.interval(days=2),
order_by=t.repeated_timestamps,
group_by=t.key,
)
self.high_card_grouped_rolling = t.value.mean().over(
high_card_rolling_window
)
@udf.reduction(['double'], 'double')
def my_mean(series):
return series.mean()
self.low_card_grouped_rolling_udf_mean = my_mean(t.value).over(
low_card_rolling_window
)
self.high_card_grouped_rolling_udf_mean = my_mean(t.value).over(
high_card_rolling_window
)
@udf.analytic(['double'], 'double')
def my_zscore(series):
return (series - series.mean()) / series.std()
low_card_window = ibis.window(group_by=t.low_card_key)
high_card_window = ibis.window(group_by=t.key)
self.low_card_window_analytics_udf = my_zscore(t.value).over(
low_card_window
)
self.high_card_window_analytics_udf = my_zscore(t.value).over(
high_card_window
)
@udf.reduction(['double', 'double'], 'double')
def my_wm(v, w):
return np.average(v, weights=w)
self.low_card_grouped_rolling_udf_wm = my_wm(t.value, t.value).over(
low_card_rolling_window
)
self.high_card_grouped_rolling_udf_wm = my_wm(t.value, t.value).over(
low_card_rolling_window
)
def time_high_cardinality_group_by(self):
self.high_card_group_by.execute()
def time_cast_to_date(self):
self.cast_to_dates.execute()
def time_cast_to_date_from_string(self):
self.cast_to_dates_from_strings.execute()
def time_multikey_group_by_with_mutate(self):
self.multikey_group_by_with_mutate.execute()
def time_simple_sort(self):
self.simple_sort.execute()
def time_multikey_sort(self):
self.multikey_sort.execute()
def time_simple_sort_projection(self):
self.simple_sort_projection.execute()
def time_multikey_sort_projection(self):
self.multikey_sort_projection.execute()
def time_low_card_grouped_rolling(self):
self.low_card_grouped_rolling.execute()
def time_high_card_grouped_rolling(self):
self.high_card_grouped_rolling.execute()
def time_low_card_grouped_rolling_udf(self):
self.low_card_grouped_rolling_udf_mean.execute()
def time_high_card_grouped_rolling_udf(self):
self.high_card_grouped_rolling_udf_mean.execute()
def time_low_card_window_analytics_udf(self):
self.low_card_window_analytics_udf.execute()
def time_high_card_grouped_rolling_udf_wm(self):
self.high_card_grouped_rolling_udf_wm.execute()
def time_low_card_grouped_rolling_udf_wm(self):
self.low_card_grouped_rolling_udf_wm.execute()
| apache-2.0 |
gandalfcode/gandalf | tests/paper_tests/freefalltest.py | 1 | 4798 | #==============================================================================
# freefalltest.py
# Run the freefall collapse test using initial conditions specified in the
# file 'freefall.dat'.
#==============================================================================
from gandalf.analysis.facade import *
from gandalf.analysis.data_fetcher import *
from gandalf.analysis.compute import lagrangian_radii
from gandalf.analysis.SimBuffer import SimBuffer, BufferException
import time
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from mpl_toolkits.axes_grid1 import AxesGrid
#--------------------------------------------------------------------------------------------------
def FreefallSolution(rho, mfrac):
tff = np.sqrt(3.0*3.1415/32.0/rho)
r0 = math.pow(mfrac, 0.33333333333)
r = np.arange(0.0, 0.99999, 0.0001)
t = np.arccos(np.sqrt(r/r0)) + np.sqrt(r/r0)*np.sqrt(1.0 - r/r0)
t *= 2.0/3.14157
return t,r
#--------------------------------------------------------------------------------------------------
rc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 16})
rc('text', usetex=True)
# Set all plot limits
tmin = 0.00
tmax = 1.0
rmin = 0.005
rmax = 1.05
rho = 3.0/4.0/3.114157
radius = 1.0
stride = 8
sim_no = 0
# Analytical solutions for grav. acceleration and potential
r_acc = np.arange(rmin, 0.99999*rmax, 0.001)
a_acc = -r_acc
gpot_acc = 0.5*(r_acc*r_acc - 3.0*radius*radius)
# Run the simulation
mainsim = newsim('freefall.dat')
setupsim()
run()
# Get grav. data for simulaton for plotting
r_data = get_data("r", sim=sim_no, snap=0)
a_data = get_data("ar", sim=sim_no, snap=0)
gpot_data = get_data("gpot", sim=sim_no, snap=0)
# Prepare 10%, 50% and 90% Lagrangian radii
CreateTimeData('lr1',lagrangian_radii,mfrac=0.05)
CreateTimeData('lr2',lagrangian_radii,mfrac=0.2)
CreateTimeData('lr3',lagrangian_radii,mfrac=0.5)
CreateTimeData('lr4',lagrangian_radii,mfrac=1.0)
data05 = get_time_data("t","lr1")
data20 = get_time_data("t","lr2")
data50 = get_time_data("t","lr3")
data100 = get_time_data("t","lr4")
# Get analytical solutions for each mass fraction
t05, r05 = FreefallSolution(rho, 0.05)
t20, r20 = FreefallSolution(rho, 0.2)
t50, r50 = FreefallSolution(rho, 0.5)
t100, r100 = FreefallSolution(rho, 1.0)
# Normalise freefall data to units of freefall time
data05.x_data /= 1.1107
data20.x_data /= 1.1107
data50.x_data /= 1.1107
data100.x_data /= 1.1107
# Create matplotlib figure object with shared x-axis
#--------------------------------------------------------------------------------------------------
#fig, axarr = plt.subplots(2, 1, sharex='col', sharey='row', figsize=(10,4))
fig, axarr = plt.subplots(2, 1, figsize=(7,8), sharex='col')
fig.subplots_adjust(hspace=0.0001, wspace=0.0001)
fig.subplots_adjust(bottom=0.07, top=0.97, left=0.12, right=0.98)
axarr[0].set_ylabel(r"$a_{_{\rm r}}$")
#axarr[0].set_xlabel(r"$r$")
axarr[0].set_xlim([0.0001, 1.04])
axarr[0].set_ylim(-1.1,0.1)
axarr[0].plot(r_acc, a_acc, color="red", linestyle='-', lw=0.5)
axarr[0].scatter(r_data[::stride], a_data[::stride], color='black', marker='.', s=4.0)
#axarr[0].legend(fontsize=12)
axarr[1].set_ylabel(r"$\phi$")
axarr[1].set_xlabel(r"$r$")
axarr[1].set_xlim([rmin, rmax])
axarr[1].set_ylim(-1.55,-0.85)
axarr[1].plot(r_acc, gpot_acc, color="red", linestyle='-', lw=0.5)
axarr[1].scatter(r_data[::stride], -gpot_data[::stride], color='black', marker='.', s=4.0)
#axarr[1].legend(fontsize=12)
plt.show()
fig.savefig('sphereaccel.pdf', dpi=50)
# Create matplotlib figure object with shared x-axis
#--------------------------------------------------------------------------------------------------
fig2, axarr2 = plt.subplots(1, 1, figsize=(7,5))
fig2.subplots_adjust(hspace=0.001, wspace=0.001)
fig2.subplots_adjust(bottom=0.1, top=0.99, left=0.1, right=0.98)
axarr2.set_ylabel(r"$R_{_{\rm LAG}}$")
axarr2.set_xlabel(r"$t/t_{_{\rm FF}}$")
axarr2.set_xlim([tmin, tmax])
axarr2.set_ylim([rmin, rmax])
axarr2.plot(t05, r05, color="red", linestyle='-', lw=0.5)
axarr2.plot(t20, r20, color="red", linestyle='-', lw=0.5)
axarr2.plot(t50, r50, color="red", linestyle='-', lw=0.5)
axarr2.plot(t100, r100, color="red", linestyle='-', lw=0.5)
axarr2.scatter(data05.x_data, data05.y_data, color='black', marker='.', s=24.0, label='$5\%$')
axarr2.scatter(data20.x_data, data20.y_data, color='black', marker=',', s=24.0, label='$20\%$')
axarr2.scatter(data50.x_data, data50.y_data, color='black', marker='D', s=24.0, label='$50\%$')
axarr2.scatter(data100.x_data, data100.y_data, color='black', marker='^', s=24.0, label='$100\%$')
axarr2.legend(fontsize=12)
plt.show()
fig2.savefig('freefall.pdf', dpi=50)
# Prevent program from closing before showing plot window
block()
| gpl-2.0 |
cmu-db/dbms-library | dbdb/core/management/commands/process_visits.py | 2 | 9818 | # stdlib imports
import glob
import gzip
import re
import os
import sys
import operator
import dateutil.parser
from pprint import pprint
# django imports
from django.core.management import BaseCommand
from django.conf import settings
from django.db import connection
from django.db.models import Q
from dbdb.core.models import System
from dbdb.core.models import SystemFeature
from dbdb.core.models import SystemVersion
from dbdb.core.models import SystemVisit
from dbdb.core.models import SystemRecommendation
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--max-threshold', type=int, default=99999,
help="Max visit count threshold per user")
parser.add_argument('--min-threshold', type=int, default=2,
help="Min visit count threshold per user")
parser.add_argument('--min-visit', type=int, default=2,
help="Min visit count threshold per system")
parser.add_argument('--ignore', action='append', type=str,
help="List of IP addresses to ignore")
parser.add_argument('--clear', action='store_true',
help="Clear out existing recommendations in the database")
parser.add_argument('--store', action='store_true',
help="Store the recommendation in the database")
parser.add_argument('--show-missing', action='store_true',
help="Show which systems are missing recommendations")
return
def show_missing(self, options):
systems = System.objects \
.filter(recommendation_to__isnull=True) \
.distinct() \
.order_by("name")
self.stdout.write("No Recommendations [%d]" % systems.count())
for system in systems:
num_visits = SystemVisit.objects.filter(system=system)
if options['ignore']:
num_visits = num_visits.filter(~Q(ip_address__in=options['ignore']))
self.stdout.write(" + %s [num_visits=%d]" % (system.name, num_visits.count()))
return
def handle(self, *args, **options):
if options['show_missing']:
self.show_missing(options)
return
# IF
# Get the list of all unique IPs
ip_addresses = [ ]
with connection.cursor() as cursor:
sql = "SELECT ip_address, user_agent, count(*) AS cnt FROM core_systemvisit "
sql_args = [ ]
# Remove ignored IPs
if options['ignore']:
sql += "WHERE ip_address NOT IN %s "
sql_args.append(options['ignore'])
sql += "GROUP BY ip_address, user_agent HAVING cnt BETWEEN %s AND %s"
sql_args.append(options['min_threshold'])
sql_args.append(options['max_threshold'])
self.stdout.write(sql)
self.stdout.write(str(sql_args))
cursor.execute(sql, tuple(sql_args))
ip_addresses = set([ (row[0],row[1]) for row in cursor.fetchall() ])
# WITH
# Get the # of visits per system
visits_per_system = { }
with connection.cursor() as cursor:
sql = "SELECT system_id, count(*) AS cnt FROM core_systemvisit "
sql_args = [ ]
# Remove ignored IPs
if options['ignore']:
sql += "WHERE ip_address NOT IN %s "
sql_args.append(options['ignore'])
sql += "GROUP BY system_id HAVING cnt > %s"
sql_args.append(str(options['min_visit']))
cursor.execute(sql, tuple(sql_args))
visits_per_system = dict([ (row[0],int(row[1])) for row in cursor.fetchall() ])
# WITH
#for system_id in sorted(visits_per_system.keys(), key=lambda x: -1*visits_per_system[x]):
#self.stdout.write(System.objects.get(id=system_id), "=>", visits_per_system[system_id])
#sys.exit(1)
# For each ip/user pair, get the systems that they viewed
all_visits = { }
user_info = { }
system_idx_xref = { }
idx_system_xref = { }
next_user_idx = 0
next_system_idx = 0
for ip, ua in ip_addresses:
systems = list()
#systems = set()
visits = SystemVisit.objects.filter(ip_address=ip, user_agent=ua)
for v in visits:
# Skip anything that did not have enough total visits
if not v.system.id in visits_per_system: continue
if not v.system.id in system_idx_xref:
system_idx_xref[v.system.id] = next_system_idx
idx_system_xref[next_system_idx] = v.system.id
next_system_idx += 1
if type(systems) is set:
systems.add(system_idx_xref[v.system.id])
else:
systems.append(system_idx_xref[v.system.id])
# Skip any user that visits only systems not above our threshold
# Otherwise we will have all zeros for the systems and this will
# break numpy when we split our training set
if len(systems) > 0:
all_visits[next_user_idx] = systems
user_info[next_user_idx] = (ip, ua)
next_user_idx += 1
## FOR
print("visits_per_system:", len(visits_per_system))
print("idx_system_xref:", len(idx_system_xref))
assert len(visits_per_system) == len(idx_system_xref)
system_cnt = System.objects.all().count()
#sys.exit(1)
#for user_idx in sorted(all_visits.keys(), key=lambda x: -1*len(all_visits[x]))[:10]:
#self.stdout.write(user_info[user_idx], "=>", len(all_visits[user_idx]))
#sys.exit(1)
self.stdout.write("# of Users: %d" % next_user_idx)
self.stdout.write("# of Sytems: %d (total=%d)" % (next_system_idx, system_cnt))
data = np.zeros((next_user_idx, next_system_idx))
for user_idx in all_visits.keys():
for system_idx in all_visits[user_idx]:
data[user_idx, system_idx] += 1
self.stdout.write(str(data))
sparsity = float(len(data.nonzero()[0]))
sparsity /= (data.shape[0] * data.shape[1])
sparsity *= 100
self.stdout.write('Sparsity: {:4.2f}%'.format(sparsity))
train_data, test_data = self.train_test_split(data)
similarity = self.compute_similarity(train_data)
self.stdout.write(str(similarity[:4, :4]))
pred = data.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
self.stdout.write('MSE: ' + str(self.get_mse(pred, test_data)))
#self.stdout.write("# of IPs: %s" % len(ip_addresses))
output = { }
for system_idx in range(0, next_system_idx):
recommendations = self.top_k_systems(similarity, system_idx, 5)
system = System.objects.get(id=idx_system_xref[system_idx])
before_recs = SystemRecommendation.objects.filter(system=system)
before_output = [ "*BEFORE*" ]
for rec in before_recs:
before_output.append("+ %s [%f]" % (rec.recommendation, rec.score))
if options['clear']: before_recs.delete()
new_output = [ "*AFTER*" ]
for i in range(1, len(recommendations)):
score = similarity[system_idx, recommendations[i]]
other_sys = System.objects.get(id=idx_system_xref[recommendations[i]])
if system == other_sys: continue
if options['store']:
rec = SystemRecommendation(system=system, recommendation=other_sys, score=score)
rec.save()
new_output.append("+ %s [%f]" % (other_sys, score))
## FOR
output_buffer = str(system) + "\n"
for i in range(0, max(len(before_output), len(new_output))):
right = ""
left = ""
if i < len(before_output): left = before_output[i]
if i < len(new_output): right = new_output[i]
output_buffer += ' {0:30} {1}\n'.format(left, right)
## FOR
output[system.name] = output_buffer
## FOR
# Print them sorted by name
for sys_name in sorted (output.keys()):
print(output[sys_name])
return
def top_k_systems(self, similarity, system_idx, k=6):
#assert system_idx in mapper
return [x for x in np.argsort(similarity[system_idx,:])[:-k-1:-1]]
def train_test_split(self, data):
test = np.zeros(data.shape)
train = data.copy()
for user_idx in range(data.shape[0]):
test_data = np.random.choice(data[user_idx, :].nonzero()[0],
size=10,
replace=True)
train[user_idx, test_data] = 0.
test[user_idx, test_data] = data[user_idx, test_data]
# Test and training are truly disjoint
assert(np.all((train * test) == 0))
return train, test
def compute_similarity(self, data, epsilon=1e-9):
# epsilon -> small number for handling dived-by-zero errors
sim = data.T.dot(data) + epsilon
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def get_mse(self, pred, actual):
# Ignore nonzero terms.
pred = pred[actual.nonzero()].flatten()
actual = actual[actual.nonzero()].flatten()
return mean_squared_error(pred, actual)
pass
| apache-2.0 |
nmayorov/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/preprocessing/label.py | 8 | 27545 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import sparse_min_max
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = np.in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = Y.astype(int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 22 | 12830 | import sys
import numpy as np
from nose import SkipTest
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# import reload
version = sys.version_info
if version[0] == 3:
# Python 3+ import for reload. Builtin in Python2
if version[1] == 3:
reload = None
else:
from importlib import reload
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
if reload is None:
raise SkipTest("Can't reload module on Python3.3")
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
robbymeals/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
jpautom/scikit-learn | sklearn/discriminant_analysis.py | 9 | 28405 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
.. versionchanged:: 0.17
Deprecated :class:`lda.LDA` have been moved to *LinearDiscriminantAnalysis*.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
.. versionchanged:: 0.17
Deprecated :class:`qda.QDA` have been moved to *QuadraticDiscriminantAnalysis*.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
Sentient07/scikit-learn | examples/missing_values.py | 71 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
michelleliu103/w205_finalproject | Sentiment_Functions.py | 1 | 2647 | import requests
import pandas as pd
import numpy as np
import json
import psycopg2
def sample_tweets():
conn = psycopg2.connect(database="finalprojecttweets", user="postgres", password="pass", host="localhost", port="5432")
cur = conn.cursor()
cur.execute("SELECT id, tweet from tweets order by random() limit 10")
tweets = cur.fetchall()
return tweets
def sentiment_analysis_api_1(tweet_list):
"""From text-processing.com"""
conn = psycopg2.connect(database="finalprojecttweets", user="postgres", password="pass", host="localhost", port="5432")
cur = conn.cursor()
count = 0
for tweet in tweet_list:
print("API Request #: " + str(count))
count += 1
api_request = requests.post("https://japerk-text-processing.p.mashape.com/sentiment/", headers={
"X-Mashape-Key": "",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json"
}, data={'text': tweet[1]})
rating_dict = json.loads(api_request.text)
rating = rating_dict["label"]
if rating == "neutral":
cur.execute("UPDATE tweets SET japerk=0 WHERE id =%s", (tweet[0],))
elif rating == "pos":
cur.execute("UPDATE tweets SET japerk=1 WHERE id =%s", (tweet[0],))
elif rating == "neg":
cur.execute("UPDATE tweets SET japerk=-1 WHERE id =%s", (tweet[0],))
conn.commit()
conn.close()
def sentiment_analysis_api_2(tweet_list):
"""From sentimentanalysis.net"""
conn = psycopg2.connect(database="finalprojecttweets", user="postgres", password="pass", host="localhost", port="5432")
cur = conn.cursor()
count = 0
for tweet in tweet_list:
print("API Request #: " + str(count))
count += 1
api_request = requests.post("https://textanalysis-text-sentiment-v1.p.mashape.com/sentiment-analyzer", headers={
"X-Mashape-Key": "",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json"
}, data={'text': tweet[1]})
rating_dict = json.loads(api_request.text)
rating = rating_dict["sentiment"]
if rating == "neutral":
cur.execute("UPDATE tweets SET textanalysis=0 WHERE id =%s", (tweet[0],))
elif rating == "positive":
cur.execute("UPDATE tweets SET textanalysis=1 WHERE id =%s", (tweet[0],))
elif rating == "negative":
cur.execute("UPDATE tweets SET textanalysis=-1 WHERE id =%s", (tweet[0],))
conn.commit()
conn.close()
tweets = sample_tweets()
sentiment_analysis_api_1(tweets)
sentiment_analysis_api_2(tweets)
| mit |
bthirion/scikit-learn | sklearn/metrics/cluster/__init__.py | 91 | 1468 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import fowlkes_mallows_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .unsupervised import calinski_harabaz_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"fowlkes_mallows_score", "entropy", "silhouette_samples",
"silhouette_score", "calinski_harabaz_score", "consensus_score"]
| bsd-3-clause |
TiKunze/CanMics | src/python/01_SingleChannel/3pop/EIN/Oscillationtest/Example_RUM_Oscillations_2pop_Nii.py | 1 | 5162 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 17:15:03 2015
@author: Tim Kunze
Copyright (C) 2015, Tim Kunze. All rights reserved.
This script is to check whether there are fix points or limit cycles in the non activated state (pext=0s-1)
with in the respective range of He-Hi-diagram (3pop)
"""
###############################################################################
#
# Imports
#
###############################################################################
import numpy as np
import sys
import scipy as sc
import os # to enable some C commands (cwd,listdir)
import matplotlib.pyplot as plt
import sys
sys.path.append("/home/raid3/tkunze/Documents/Programming/JansenModels/Kunze_JuR/003_ModelDevelopment/001_Unifying_Framework")
#2pop version
import Models.Generic_fin_01 as FCV2
import Simulation_And_Analysis.Sim_Simulation_003 as simulate
currpath = '/home/raid3/tkunze/Documents/Programming/JansenModels/Kunze_JuR/003_ModelDevelopment/001_Unifying_Framework/EIPy_StateSpaceExploration'
os.chdir(currpath)
#%%
###############################################################################
#
# Main
#
###############################################################################
JR = FCV2.JuR()
dt = 1000e-6/0.01 # to account for implementation of the model in dimensionless form
JR.integ_stepsize = dt
JR.b1=0 #controls connection pe and ep: 1-> connected
JR.b2=0 #controls input : 1-> input to EI
JR.b3=0 #controls self-conn of PC : 1-> No selfconn PP
JR.b4=0 #controls self-conn of IIN : 1-> No selfconn II
JR.n=2
JR.coupling_II = np.zeros((2,2))
JR.coupling_EX = np.zeros((2,2))
JR.distanceMatrix = (np.ones((2,2)) - np.identity(2))*0.001
JR.init = np.zeros((10,JR.n))
JR.c_In_ii=0
JR.c_In_ex=0
JR.c_pp=113.4
JR.c_ii=33.25
JR.configure()
#%%
###############################################################################
#
## RUM show case
###############################################################################
#%% 2pop version
lenge=940
inte=100
JR.H_e=7e-3
JR.H_i=15e-3
t_simulation = 5/0.01
N=t_simulation/dt
time = np.arange(0,N*dt,dt)
p_sim_ex = np.zeros((N,JR.n))
p_sim_i = np.zeros((N,JR.n))
p_sim_ex[1000:1000+lenge,:]=inte
signal,sig_ei,sig_ii,impact,data = simulate.simulate_network_SHC(JR,p_sim_ex,p_sim_i,t_simulation)
plt.figure(25)
plt.clf()
plt.subplot(211)
plt.plot(time*0.01,signal[:,0]/560)
plt.grid()
plt.plot([(1000+lenge)/1000, (1000+lenge)/1000 ],[-0.01, 0.015],'k')
plt.xlabel('time in s')
plt.ylabel('pyrapot in V')
plt.title('he%.1f:, hi:%.1f | inte=%.1f | length:%.1fms' %(JR.H_e*1000,JR.H_i*1000,inte,lenge))
plt.subplot(212)
plt.plot(time*0.01,p_sim_ex+p_sim_i)
plt.ylabel('pext')
plt.xlabel('time in s')
###############################################################################
#
## IOsci test
###############################################################################
#%% 3pop version
t_simulation = 1.5/0.01
N=t_simulation/dt
time = np.arange(0,N*dt,dt)
p_sim_ex = np.zeros((N,JR.n))
p_sim_i = np.zeros((N,JR.n))
he_range = np.arange(0,16.1,0.1)*1e-3
hi_range = np.arange(0,35.1,0.1)*1e-3
state_grid = np.zeros((len(he_range),len(hi_range),2))
sim_grid=np.zeros((len(he_range),len(hi_range),N))
i=0
j=0
for he in he_range:
j=0
JR.H_e=he
for hi in hi_range:
JR.H_i=hi
print "he: %.2fmV | hi: %.2fmV" %(he*1000,hi*1000)
signal,sig_ei,sig_ii,impact,data = simulate.simulate_network_SHC(JR,p_sim_ex,p_sim_i,t_simulation)
state_grid[i,j,0] = np.min(signal[1000:,0])
state_grid[i,j,1] = np.max(signal[1000:,0])
sim_grid[i,j,:]=signal[:,0]
j+=1
i+=1
np.save('RUM_oscitest_2popNii_he0i0k1t16_hi0i0k1t35_pext0_state_grid.npy',state_grid)
np.save('RUM_oscitest_2popNii_he0i0k1t16_hi0i0k1t35_pext0_sim_grid.npy',sim_grid)
#
###############################################################################
#
## Analysis
#
###############################################################################
stategrid=np.load('RUM_oscitest_2popNpp_he0i0k1t16_hi0i0k1t20_pext0_state_grid.npy')
simgrid=np.load('RUM_oscitest_2popNpp_he0i0k1t16_hi0i0k1t20_pext0_sim_grid.npy')
he_pos=80
hi_pos=100
plt.figure(50)
plt.clf()
plt.plot(simgrid[he_pos,hi_pos,:]/560)
[rows,cols,vals]=np.shape(stategrid)
oscigrid=np.zeros((rows,cols))
for i in range(rows):
for j in range(cols):
if (stategrid[i,j,1]/560-stategrid[i,j,0]/560)<0.001:
oscigrid[i,j]=0 #no oscillations
else:
oscigrid[i,j]=1 # oscillations occur
he_range = np.arange(0,16.1,0.1)*1e-3
hi_range = np.arange(0,35.1,0.1)*1e-3
min_hi=np.min(hi_range)
max_hi=np.max(hi_range)
min_he=np.min(he_range)
max_he=np.max(he_range)
plt.figure(2)
#state_grid=cleargrid(state_grid)
plt.clf()
plt.imshow(np.flipud(oscigrid), aspect='auto', extent = (min_hi,max_hi,min_he,max_he),interpolation='none')
plt.ylabel('He in V')
plt.xlabel('Hi in V')
plt.title('Oscillation diagram')
plt.colorbar()
plt.savefig('RUM_2popNii_oscillationstest.pdf', format='pdf', dpi=1000)
#
| gpl-3.0 |
maciejkula/scipy | scipy/interpolate/interpolate.py | 1 | 75440 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy.lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate')
def __init__(self, c, x, extrapolate=None):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape = x.shape
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
return out.reshape(x_shape + self.c.shape[2:])
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
n : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, 0, -1], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
Methods
-------
__call__
extend
derivative
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out, self.c.dtype)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
raise NotImplementedError('Antiderivative not implemented.')
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 176 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
YinongLong/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
cwu2011/scikit-learn | sklearn/neighbors/unsupervised.py | 106 | 4461 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tseries/tests/test_resample.py | 1 | 43272 | # pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
from pandas.tseries.frequencies import MONTHS, DAYS
import pandas.tseries.offsets as offsets
import pandas as pd
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
class TestResample(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEquals(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M', how='sum'),
ts.resample('M', how=mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M', how='sum'),
ts.resample('M', how=mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D', how=lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assert_(resampled.index.equals(exp_rng))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assert_(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
result = ts.resample('A', kind='period')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
result = ts.resample('M')
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M')
expected = df.resample('M', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left')
exp = df.tshift(1, freq='D').resample('M', kind='period')
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q')
expected = df.resample('Q', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left')
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left')
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M')
self.assert_(len(resampled) == 1)
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
result = ts.resample(freq, how='mean')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left')
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
self.assert_(result.index.equals(ex_index))
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC')
self.assert_(len(result) == 0)
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period')
self.assert_(len(result) == 1)
self.assert_(result.index[0] == Period('2000-04', freq='M'))
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d')
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min', how=['mean', 'sum'])
tm.assert_isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D', how='sum')
exp = ts.sort_index().resample('D', how='sum')
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64','int32','float64','float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype = dtype)
result = df.resample("T", how=lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T", how="median")
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M', how=lambda x: x.mean())
exp = ts.resample('M', how='mean')
tm.assert_series_equal(result, exp)
self.assertRaises(Exception, ts.resample, 'M',
how=[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result = ts.resample('M', how={'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
foo_exp = ts.resample('M', how='mean')
bar_exp = ts.resample('M', how='std')
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS', 'sum')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return TimeSeries(np.random.randn(len(rng)), index=rng)
class TestResamplePeriodIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = ts.resample(targ, fill_method=meth,
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995',
freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec'), result)
assert_series_equal(ts.resample('a'), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
self.assertRaises(ValueError, ts.resample, 'a-dec')
self.assertRaises(ValueError, ts.resample, 'q-mar')
self.assertRaises(ValueError, ts.resample, 'M')
self.assertRaises(ValueError, ts.resample, 'w-thu')
def test_basic_upsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
resampled = result.resample('D', fill_method='ffill', convention='end')
expected = result.to_timestamp('D', how='end')
expected = expected.asfreq('D', 'ffill').to_period()
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', fill_method='ffill', limit=2,
convention='end')
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D', fill_method='ffill')
exp = df['a'].resample('D', fill_method='ffill')
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M', fill_method='ffill')
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp')
expected = ts.to_timestamp(how='end').resample('A-DEC')
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month, fill_method='ffill')
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how, fill_method='ffill')
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR', fill_method='ffill')
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = TimeSeries(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A')
expected = stamps.resample('A').to_period('A')
assert_series_equal(filled, expected)
filled = s.resample('A', fill_method='ffill')
expected = stamps.resample('A', fill_method='ffill').to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = TimeSeries(np.random.randn(5), index=rng)
self.assertRaises(Exception, s.resample, 'A')
def test_resample_5minute(self):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = TimeSeries(np.random.randn(len(rng)), index=rng)
result = ts.resample('5min')
expected = ts.to_timestamp().resample('5min')
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D')
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s')
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_empty(self):
ts = _simple_pts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min', how=len)
expected = s.resample('10min', how=len).ix[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU')
self.assert_(result.isnull().all())
result = ts.resample('W-THU', fill_method='ffill')[:-1]
expected = ts.asfreq('W-THU', method='ffill')
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W')
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample('W').tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D')
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right')
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period')
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', how='mean', closed='left', label='right')
exp = s[1:].resample('10min', how='mean', closed='left', label='right')
assert_series_equal(result, exp)
result = s.resample('10min', how='mean', closed='left', label='left')
exp = s[1:].resample('10min', how='mean', closed='left', label='left')
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
self.assert_(result.index.equals(ex_index))
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A')
exp = ts.to_timestamp().resample('A').to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', how='first', closed='left', label='left')
def test_resample_bms_2752(self):
# GH2753
foo = pd.Series(index=pd.bdate_range('20000101','20000201'))
res1 = foo.resample("BMS")
res2 = foo.resample("BMS").resample("B")
self.assertEqual(res1.index[0], Timestamp('20000103'))
self.assertEqual(res1.index[0], res2.index[0])
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span')
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right'))
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left'))
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A", how='mean')
tm.assert_almost_equal(result[0], s.mean())
def test_resample_doesnt_truncate(self):
# Test for issue #3020
import pandas as pd
dates = pd.date_range('01-Jan-2014','05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D')
self.assertEquals(result.index[0], dates[0])
class TestTimeGrouper(tm.TestCase):
def setUp(self):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
grouper = TimeGrouper('A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.order()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
grouper = TimeGrouper('A', label='right', closed='right')
result = self.ts.resample('A', how='count')
expected = self.ts.groupby(lambda x: x.year).count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', how='prod', closed='right')
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
grouper = tg.get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
self.assertTrue(result.index.equals(df.index))
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
wp = pd.Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
grouper = tg.get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert(isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'PeriodIndex', 'Index', 'Float64Index',
'MultiIndex')
index_funcs = (tm.makeIntIndex, tm.makePeriodIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assertRaisesRegexp(TypeError,
"axis must be a DatetimeIndex, "
"but got an instance of %r" % name):
df.groupby(TimeGrouper('D'))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch07/boston1.py | 22 | 1147 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script shows an example of simple (ordinary) linear regression
# The first edition of the book NumPy functions only for this operation. See
# the file boston1numpy.py for that version.
import numpy as np
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot as plt
boston = load_boston()
x = boston.data
y = boston.target
# Fitting a model is trivial: call the ``fit`` method in LinearRegression:
lr = LinearRegression()
lr.fit(x, y)
# The instance member `residues_` contains the sum of the squared residues
rmse = np.sqrt(lr.residues_/len(x))
print('RMSE: {}'.format(rmse))
fig, ax = plt.subplots()
# Plot a diagonal (for reference):
ax.plot([0, 50], [0, 50], '-', color=(.9,.3,.3), lw=4)
# Plot the prediction versus real:
ax.scatter(lr.predict(x), boston.target)
ax.set_xlabel('predicted')
ax.set_ylabel('real')
fig.savefig('Figure_07_08.png')
| mit |
smartscheduling/scikit-learn-categorical-tree | sklearn/feature_selection/tests/test_rfe.py | 1 | 11274 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1, make_regression
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=10, n_jobs=1)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
diff_support = rfe.get_support() == rfe_svc.get_support()
assert_true(sum(diff_support) == len(diff_support))
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step)) | bsd-3-clause |
bnaul/scikit-learn | examples/preprocessing/plot_scaling_importance.py | 34 | 5381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Importance of Feature Scaling
=========================================================
Feature scaling through standardization (or Z-score normalization)
can be an important preprocessing step for many machine learning
algorithms. Standardization involves rescaling the features such
that they have the properties of a standard normal distribution
with a mean of zero and a standard deviation of one.
While many algorithms (such as SVM, K-nearest neighbors, and logistic
regression) require features to be normalized, intuitively we can
think of Principle Component Analysis (PCA) as being a prime example
of when normalization is important. In PCA we are interested in the
components that maximize the variance. If one component (e.g. human
height) varies less than another (e.g. weight) because of their
respective scales (meters vs. kilos), PCA might determine that the
direction of maximal variance more closely corresponds with the
'weight' axis, if those features are not scaled. As a change in
height of one meter can be considered much more important than the
change in weight of one kilogram, this is clearly incorrect.
To illustrate this, PCA is performed comparing the use of data with
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` applied,
to unscaled data. The results are visualized and a clear difference noted.
The 1st principal component in the unscaled set can be seen. It can be seen
that feature #13 dominates the direction, being a whole two orders of
magnitude above the other features. This is contrasted when observing
the principal component for the scaled version of the data. In the scaled
version, the orders of magnitude are roughly the same across all the features.
The dataset used is the Wine Dataset available at UCI. This dataset
has continuous features that are heterogeneous in scale due to differing
properties that they measure (i.e alcohol content, and malic acid).
The transformed data is then used to train a naive Bayes classifier, and a
clear difference in prediction accuracies is observed wherein the dataset
which is scaled before PCA vastly outperforms the unscaled version.
"""
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.pipeline import make_pipeline
print(__doc__)
# Code source: Tyler Lanigan <[email protected]>
# Sebastian Raschka <[email protected]>
# License: BSD 3 clause
RANDOM_STATE = 42
FIG_SIZE = (10, 7)
features, target = load_wine(return_X_y=True)
# Make a train/test split using 30% test size
X_train, X_test, y_train, y_test = train_test_split(features, target,
test_size=0.30,
random_state=RANDOM_STATE)
# Fit to data and predict using pipelined GNB and PCA.
unscaled_clf = make_pipeline(PCA(n_components=2), GaussianNB())
unscaled_clf.fit(X_train, y_train)
pred_test = unscaled_clf.predict(X_test)
# Fit to data and predict using pipelined scaling, GNB and PCA.
std_clf = make_pipeline(StandardScaler(), PCA(n_components=2), GaussianNB())
std_clf.fit(X_train, y_train)
pred_test_std = std_clf.predict(X_test)
# Show prediction accuracies in scaled and unscaled data.
print('\nPrediction accuracy for the normal test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test)))
print('\nPrediction accuracy for the standardized test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test_std)))
# Extract PCA from pipeline
pca = unscaled_clf.named_steps['pca']
pca_std = std_clf.named_steps['pca']
# Show first principal components
print('\nPC 1 without scaling:\n', pca.components_[0])
print('\nPC 1 with scaling:\n', pca_std.components_[0])
# Use PCA without and with scale on X_train data for visualization.
X_train_transformed = pca.transform(X_train)
scaler = std_clf.named_steps['standardscaler']
X_train_std_transformed = pca_std.transform(scaler.transform(X_train))
# visualize standardized vs. untouched dataset with PCA performed
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=FIG_SIZE)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax1.scatter(X_train_transformed[y_train == l, 0],
X_train_transformed[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax2.scatter(X_train_std_transformed[y_train == l, 0],
X_train_std_transformed[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
ax1.set_title('Training dataset after PCA')
ax2.set_title('Standardized training dataset after PCA')
for ax in (ax1, ax2):
ax.set_xlabel('1st principal component')
ax.set_ylabel('2nd principal component')
ax.legend(loc='upper right')
ax.grid()
plt.tight_layout()
plt.show()
| bsd-3-clause |
michigraber/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 84 | 14181 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
erichilarysmithsr/givinggraph | givinggraph/analysis/company_cause_classifier.py | 3 | 4320 | """ Predict whether a company supports a cause based on its web presence. We
train a model based on the data at milliondollarlist.org, which posts
donations of $1M or more from companies to causes."""
# FIXME: read/write from database.
# FIXME: save/load classifier
import argparse
from collections import defaultdict
import io
import re
import string
import numpy as np
from sklearn import cross_validation, metrics
from sklearn.feature_extraction.text import CountVectorizer
#from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
#from sklearn.naive_bayes import MultinomialNB
#from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.svm import LinearSVC
company_words = set()
punct_re = re.compile('[%s]' % re.escape(string.punctuation))
def read_causes(filename):
global company_words
causes = set()
co2causes = defaultdict(lambda: set())
for line in io.open(filename, mode='rt'):
parts = line.strip().split('\t')
co2causes[parts[0]].add(parts[1])
causes.add(parts[1])
company_words |= set(do_tokenize(parts[0]))
return co2causes, causes
def read_pages(filename, co2causes):
"""Read company web page file, retaining only those in co2causes"""
co2page = dict()
for line in io.open(filename, mode='rt', encoding='latin_1'):
parts = line.strip().split('\t')
if parts[0] in co2causes and len(parts) > 1:
co2page[parts[0]] = parts[1]
return co2page
def print_top_words(vectorizer, clf, class_labels, n=10):
"""Prints features with the highest coefficient values, per class"""
feature_names = vectorizer.get_feature_names()
for i, class_label in enumerate(class_labels):
topn = np.argsort(clf.coef_[i])[-n:]
print("%s: %s" % (class_label,
" ".join(feature_names[j] for j in topn)))
def do_tokenize(s):
s = punct_re.sub(' ', s.lower())
s = re.sub('\s+', ' ', s)
return s.strip().split()
def tokenize(s):
global company_words
toks = do_tokenize(s)
return [t for t in toks if t not in company_words]
if (__name__ == '__main__'):
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--homepages',
metavar='HOMEPAGES',
default='company_pages.tsv',
help='file in format company_name<TAB>web_text')
ap.add_argument('--causes',
metavar='CAUSES',
default='company_causes.tsv',
help='file in format company_name<TAB>cause . Note that companies may appear more than once.')
args = ap.parse_args()
company2causes, causes = read_causes(args.causes)
print 'read %d companies with causes' % len(company2causes.keys())
company2page = read_pages(args.homepages, company2causes)
print 'read %d homepages' % len(company2causes.keys())
companies = company2page.keys()
pipeline = CountVectorizer(tokenizer=tokenize)
X = pipeline.fit_transform([company2page[c] for c in companies])
# convert labels to multilabel format
Y = np.array([list(company2causes[c]) for c in companies])
N = len(Y)
label_enc = LabelEncoder()
label_enc.fit(list(causes))
print 'found %d causes' % len(label_enc.classes_)
Y = [list(label_enc.transform(yi)) for yi in Y]
print 'labels:', label_enc.classes_
# LabelBinarizer buggy with np arrays. See https://github.com/scikit-learn/scikit-learn/issues/856
Y = LabelBinarizer().fit_transform(Y)
#clf = OneVsRestClassifier(MultinomialNB())
#clf = OneVsRestClassifier(LogisticRegression())
clf = OneVsRestClassifier(LinearSVC(random_state=0))
cv = cross_validation.KFold(len(Y), 10, shuffle=True, random_state=1234)
losses = []
for train, test in cv:
truth = Y[test]
pred = clf.fit(X[train], Y[train]).predict(X[test])
losses.append(metrics.precision_score(truth.reshape(-1), pred.reshape(-1)))
print 'F1 score=%.3f stderr=%.3f' % (np.average(losses), np.std(losses))
# Retrain on all to print top words
clf.fit(X, Y)
print_top_words(pipeline, clf, label_enc.classes_, 20)
| mit |
spallavolu/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
EuropeanSocialInnovationDatabase/ESID-main | TextMining/Classifiers/Trainers/RuleBasedInnovativeness.py | 1 | 21912 | from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.pipeline import Pipeline
import numpy as np
import pandas as pd
import re
from os import listdir
from os.path import join,isdir
from sklearn.utils import resample
from sklearn.model_selection import cross_val_score
import pickle
from sklearn.utils import resample
class DataSet:
Annotators = []
def __init__(self):
self.Annotators = []
class Annotator:
files = []
documents = []
Name = ""
def __init__(self):
self.files = []
self.documents = []
self.Name = ""
class Document:
Lines = []
DocumentName = ""
DatabaseID = ""
Annotations = []
Text = ""
isSpam = False
Project_Mark_Objective_1A = 0
Project_Mark_Objective_1B = 0
Project_Mark_Objective_1C = 0
Project_Mark_Actors_2A = 0
Project_Mark_Actors_2B = 0
Project_Mark_Actors_2C = 0
Project_Mark_Outputs_3A = 0
Project_Mark_Innovativeness_3A = 0
isProjectObjectiveSatisfied = False
isProjectActorSatisfied = False
isProjectOutputSatisfied = False
isProjectInnovativenessSatisfied = False
isProjectObjectiveSatisfied_predicted = False
isProjectActorSatisfied_predicted = False
isProjectOutputSatisfied_predicted = False
isProjectInnovativenessSatisfied_predicted = False
def __init__(self):
self.Text = ""
self.Lines = []
self.DocumentName = ""
self.DatabaseID = ""
self.Annotations = []
self.isSpam = False
self.Project_Mark_Objective_1A = 0
self.Project_Mark_Objective_1B = 0
self.Project_Mark_Objective_1C = 0
self.Project_Mark_Actors_2A = 0
self.Project_Mark_Actors_2B = 0
self.Project_Mark_Actors_2C = 0
self.Project_Mark_Outputs_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.isProjectObjectiveSatisfied = False
self.isProjectActorSatisfied = False
self.isProjectOutputSatisfied = False
self.isProjectInnovativenessSatisfied = False
self.isProjectObjectiveSatisfied_predicted = False
self.isProjectActorSatisfied_predicted = False
self.isProjectOutputSatisfied_predicted = False
self.isProjectInnovativenessSatisfied_predicted = False
class Line:
StartSpan = 0
EndSpan = 0
Text = ""
Sentences = []
Tokens = []
Annotations = []
def __init__(self):
self.StartSpan = 0
self.EndSpan = 0
self.Text = ""
self.Sentences = []
self.Tokens = []
self.Annotations = []
class Sentence:
SentenceText = ""
StartSpan = -1
EndSpan = -1
Annotations = []
def __init__(self):
self.SentenceText = ""
self.StartSpan = -1
self.EndSpan = -1
self.Annotations = []
class Annotation:
FromFile = ""
FromAnnotator = ""
AnnotationText = ""
StartSpan = -1
EndSpan = -1
HighLevelClass = ""
LowLevelClass = ""
data_folder = "../../../Helpers/FullDataset_Alina/"
ds = DataSet()
total_num_spam = 0
sentences = []
total_num_files = 0
# job = aetros.backend.start_job('nikolamilosevic86/GloveModel')
annotators = [f for f in listdir(data_folder) if isdir(join(data_folder, f))]
for ann in annotators:
folder = data_folder + "/" + ann
Annot = Annotator()
Annot.Name = ann
ds.Annotators.append(Annot)
onlyfiles = [f for f in listdir(folder) if (f.endswith(".txt"))]
for file in onlyfiles:
Annot.files.append(data_folder + "/" + ann + '/' + file)
doc = Document()
total_num_files = total_num_files + 1
doc.Lines = []
# doc.Annotations = []
doc.DocumentName = file
Annot.documents.append(doc)
if (file.startswith('a') or file.startswith('t')):
continue
print file
doc.DatabaseID = file.split("_")[1].split(".")[0]
fl = open(data_folder + "/" + ann + '/' + file, 'r')
content = fl.read()
doc.Text = content
lines = content.split('\n')
line_index = 0
for line in lines:
l = Line()
l.StartSpan = line_index
l.EndSpan = line_index + len(line)
l.Text = line
line_index = line_index + len(line) + 1
sentences.append(line)
doc.Lines.append(l)
an = open(data_folder + "/" + ann + '/' + file.replace(".txt", ".ann"), 'r')
annotations = an.readlines()
for a in annotations:
a = re.sub(r'\d+;\d+', '', a).replace(' ', ' ')
split_ann = a.split('\t')
if (split_ann[0].startswith("T")):
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
low_level_ann = sp_split_ann[0]
if low_level_ann == "ProjectMark":
continue
span_start = sp_split_ann[1]
span_end = sp_split_ann[2]
ann_text = split_ann[2]
Ann = Annotation()
Ann.AnnotationText = ann_text
Ann.StartSpan = int(span_start)
Ann.EndSpan = int(span_end)
Ann.FromAnnotator = Annot.Name
Ann.FromFile = file
Ann.LowLevelClass = low_level_ann
if (low_level_ann == "SL_Outputs_3a"):
Ann.HighLevelClass = "Outputs"
if (
low_level_ann == "SL_Objective_1a" or low_level_ann == "SL_Objective_1b" or low_level_ann == "SL_Objective_1c"):
Ann.HighLevelClass = "Objectives"
if (
low_level_ann == "SL_Actors_2a" or low_level_ann == "SL_Actors_2b" or low_level_ann == "SL_Actors_2c"):
Ann.HighLevelClass = "Actors"
if (low_level_ann == "SL_Innovativeness_4a"):
Ann.HighLevelClass = "Innovativeness"
doc.Annotations.append(Ann)
for line in doc.Lines:
if line.StartSpan <= Ann.StartSpan and line.EndSpan >= Ann.EndSpan:
line.Annotations.append(Ann)
else:
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
mark_name = sp_split_ann[0]
if (len(sp_split_ann) <= 2):
continue
mark = sp_split_ann[2].replace('\n', '')
if (mark_name == "DL_Outputs_3a"):
doc.Project_Mark_Outputs_3A = int(mark)
if int(mark) >= 1:
doc.isProjectOutputSatisfied = True
if (mark_name == "DL_Objective_1a"):
doc.Project_Mark_Objective_1A = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1b" or mark_name == "DL_Objective"):
doc.Project_Mark_Objective_1B = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1c"):
doc.Project_Mark_Objective_1C = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Innovativeness_4a" or mark_name=="DL_Innovativeness"):
doc.Project_Mark_Innovativeness_3A = int(mark)
if int(mark) >= 1:
doc.isProjectInnovativenessSatisfied = True
if (mark_name == "DL_Actors_2a" or mark_name=="DL_Actors"):
doc.Project_Mark_Actors_2A = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2b"):
doc.Project_Mark_Actors_2B = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2c"):
doc.Project_Mark_Actors_2C = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (
doc.Project_Mark_Objective_1A == 0 and doc.Project_Mark_Objective_1B == 0 and doc.Project_Mark_Objective_1C == 0 and doc.Project_Mark_Actors_2A == 0
and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2C == 0 and doc.Project_Mark_Outputs_3A == 0
and doc.Project_Mark_Innovativeness_3A == 0):
doc.isSpam = True
total_num_spam = total_num_spam + 1
i = 0
j = i + 1
kappa_files = 0
done_documents = []
num_overlap_spam = 0
num_spam = 0
total_objectives = 0
total_outputs = 0
total_actors = 0
total_innovativeness = 0
ann1_annotations_objectives = []
ann2_annotations_objectives = []
ann1_annotations_actors = []
ann2_annotations_actors = []
ann1_annotations_outputs = []
ann2_annotations_outputs = []
ann1_annotations_innovativeness = []
ann2_annotations_innovativeness = []
match_objectives = 0
match_outputs = 0
match_actors = 0
match_innovativeness = 0
while i < len(ds.Annotators) - 1:
while j < len(ds.Annotators):
annotator1 = ds.Annotators[i]
annotator2 = ds.Annotators[j]
for doc1 in annotator1.documents:
for doc2 in annotator2.documents:
if doc1.DocumentName == doc2.DocumentName and doc1.DocumentName not in done_documents:
done_documents.append(doc1.DocumentName)
line_num = 0
ann1_objective = [0] * len(doc1.Lines)
ann2_objective = [0] * len(doc2.Lines)
ann1_output = [0] * len(doc1.Lines)
ann2_output = [0] * len(doc2.Lines)
ann1_actor = [0] * len(doc1.Lines)
ann2_actor = [0] * len(doc2.Lines)
ann1_innovativeness = [0] * len(doc1.Lines)
ann2_innovativeness = [0] * len(doc2.Lines)
while line_num < len(doc1.Lines):
if len(doc1.Lines[line_num].Annotations) > 0:
for a in doc1.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann1_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann1_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann1_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann1_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
for a1 in doc2.Lines[line_num].Annotations:
if a1.HighLevelClass == a.HighLevelClass:
if a1.HighLevelClass == "Objectives":
match_objectives = match_objectives + 1
if a1.HighLevelClass == "Outputs":
match_outputs = match_outputs + 1
if a1.HighLevelClass == "Actors":
match_actors = match_actors + 1
if a1.HighLevelClass == "Innovativeness":
match_innovativeness = match_innovativeness + 1
if len(doc2.Lines[line_num].Annotations) > 0:
for a in doc2.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann2_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann2_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann2_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann2_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
line_num = line_num + 1
ann1_annotations_outputs.extend(ann1_output)
ann2_annotations_outputs.extend(ann2_output)
ann1_annotations_objectives.extend(ann1_objective)
ann2_annotations_objectives.extend(ann2_objective)
ann1_annotations_actors.extend(ann1_actor)
ann2_annotations_actors.extend(ann2_actor)
ann1_annotations_innovativeness.extend(ann1_innovativeness)
ann2_annotations_innovativeness.extend(ann2_innovativeness)
print "Statistics for document:" + doc1.DocumentName
print "Annotators " + annotator1.Name + " and " + annotator2.Name
print "Spam by " + annotator1.Name + ":" + str(doc1.isSpam)
print "Spam by " + annotator2.Name + ":" + str(doc2.isSpam)
if (doc1.isSpam == doc2.isSpam):
num_overlap_spam = num_overlap_spam + 1
if doc1.isSpam:
num_spam = num_spam + 1
if doc2.isSpam:
num_spam = num_spam + 1
kappa_files = kappa_files + 1
j = j + 1
i = i + 1
j = i + 1
print annotators
doc_array = []
text_array = []
objectives = []
actors = []
outputs = []
innovativeness = []
for ann in ds.Annotators:
for doc in ann.documents:
doc_array.append(
[doc.Text, doc.isProjectObjectiveSatisfied, doc.isProjectActorSatisfied, doc.isProjectOutputSatisfied,
doc.isProjectInnovativenessSatisfied])
objectives.append(doc.isProjectObjectiveSatisfied)
actors.append(doc.isProjectActorSatisfied)
outputs.append(doc.isProjectOutputSatisfied)
innovativeness.append(doc.isProjectInnovativenessSatisfied)
text_array.append(doc.Text)
df = pd.DataFrame({'text':text_array,'classa':innovativeness})
df_majority = df[df.classa==0]
df_minority = df[df.classa==1]
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=160, # to match majority class
random_state=83293) # reproducible results
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
# Display new class counts
print df_upsampled.classa.value_counts()
TP = 0
FP = 0
FN = 0
classes = df_upsampled.classa
i = 0
innovative_1 = 0
innovative_2 = 0
innovative_3 = 0
for sample in doc_array:
if "innovation" in sample[0] or "innovative" in sample[0] or "novelty" in sample[0]:
innovative_1 = innovative_1 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
i = i + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Innovation rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
for sample in doc_array:
if ("new" in sample[0] or "novel" in sample[0] or "alternative" in sample[0] or "improved" in sample[0] or "cutting edge" in sample[0] or "better" in sample[0])\
and ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
innovative_2 = innovative_2 +1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
i = i + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Other rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
for sample in doc_array:
isInnovative = False
if ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
list_items = ["method","product","service","application","technology","practice"]
index_list = []
for item in list_items:
indexes = [m.start() for m in re.finditer(item, sample[0])]
index_list.extend(indexes)
for index in index_list:
end = len(sample[0])
start = 0
if index - 500>0:
start = index - 500
if index + 500<len(sample[0]):
end = index + 500
substr = sample[0][start:end]
if ("new" in substr or "novel" in substr or "alternative" in substr or "improved" in substr or "cutting edge" in substr or "better" in substr):
isInnovative = True
if isInnovative:
innovative_3 = innovative_3 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Third rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
innovative_4 = 0
for sample in doc_array:
isInnovative = False
if "innovation" in sample[0] or "innovative" in sample[0] or "novelty" in sample[0]:
isInnovative = True
if ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
list_items = ["method","product","service","application","technology","practice"]
index_list = []
for item in list_items:
indexes = [m.start() for m in re.finditer(item, sample[0])]
index_list.extend(indexes)
for index in index_list:
end = len(sample[0])
start = 0
if index - 500>0:
start = index - 500
if index + 500<len(sample[0]):
end = index + 500
substr = sample[0][start:end]
if ("new" in substr or "novel" in substr or "alternative" in substr or "improved" in substr or "cutting edge" in substr or "better" in substr):
isInnovative = True
if isInnovative:
innovative_4 = innovative_4 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
print ""
print "Innovative 1:"+str(innovative_1)
print "Innovative 2:"+str(innovative_2)
print "Innovative 3:"+str(innovative_3)
print "Innovative 4 (1+3):"+str(innovative_4)
#scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
# train = text_array[0:int(0.8*len(text_array))]
# train_Y = innovativeness[0:int(0.8*len(actors))]
#
# test = text_array[int(0.8*len(text_array)):]
# test_Y = innovativeness[int(0.8*len(actors)):]
#
# #categories = ['non actor', 'actor']
#
# text_clf = Pipeline([('vect', CountVectorizer()),
# ('tfidf', TfidfTransformer()),
# ('clf', MultinomialNB()),
# ])
#
# scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
# final = 0
# for score in scores:
# final = final + score
# print scores
# print "Final:" + str(final/10)
# text_clf.fit(train,train_Y)
#
# TP = 0
# FP = 0
# FN = 0
# i = 0
# outcome = text_clf.predict(test)
# for i in range(0,len(test)):
# if test_Y[i] == True and outcome[i] == True:
# TP = TP+1
# if test_Y[i] == False and outcome[i]==True:
# FP = FP+1
# if test_Y[i]==True and outputs[i]==False:
# FN = FN + 1
# i = i + 1
# precision = float(TP)/float(TP+FP)
# recall = float(TP)/float(TP+FN)
# f_score = 2*precision*recall/(precision+recall)
# print "ML based rule classifier"
# print "False positives:"+str(FP)
# print "False negatives:"+str(FN)
# print "True positive:"+str(TP)
# print "Precision: "+str(precision)
# print "Recall: "+str(recall)
# print "F1-score: "+str(f_score)
| gpl-3.0 |
sivakasinathan/incubator | make_2DRs/generate_matrices.py | 1 | 5629 | from __future__ import division
from Bio import SeqIO
#import nwalign
#import pandas as pd
import itertools
import argparse
import glob
import os.path as op
import os
import numpy as np
import editdistance
__all__ = ['getBlockMatrix', 'getDotMatrix',
'processFolder']
# This is old -- requires a BED file of locations of repeats along
# long reads (derived from, e.g., HMMER profile searching)
def getBlockMatrix(seq, bedDf):
"""Creates alignment matrix from BED and FASTA file.
Parameters
----------
seqObj : str
Nucleotide sequence
bedDf : pd.DataFrame [features x 2]
Table of feature coordinates.
columns: [start, end]
Returns
-------
mat : np.ndarray [features x features]"""
seqL=[seq[row['start']:row['end']] for irow,row in bedDf.iterrows()]
mat=np.zeros((len(seqL),len(seqL)))
for i,j in itertools.product(range(len(seqL)),range(len(seqL))):
if i < j:
s1=seqL[i]
s2=seqL[j]
a1,a2=nwalign.global_align(str(s1),str(s2),gap_open=-2,gap_extend=-1)
mat[i,j] = np.array([m!=n and m != '-' and n != '-' for m,n in zip(a1,a2)]).sum()
mat[j,i] = mat[i,j]
return mat
def getDotMatrix(seq, window=170,step=17):
"""Creates alignment matrix (dotplot) from FASTA file.
Parameters
----------
seqObj : str
Nucleotide sequence
window : int
k-mer size for dotplot (default: 170)
step : int
step size for dotplot (default: 17)
Returns
-------
M : np.ndarray"""
seqDict = {}
seq=seq.upper()
idxs=range(0,len(seq)-window,step)
L = len(idxs)
for i in idxs:
section = seq[i:i+window]
try:
seqDict[section].append(i)
except KeyError:
seqDict[section] = [i]
M = np.zeros((L,L))
for s1,s2 in itertools.combinations(seqDict.keys(),2):
score = editdistance.eval(s1,s2)
for idx1,idx2 in itertools.product(seqDict[s1],seqDict[s2]):
i = idxs.index(idx1)
j = idxs.index(idx2)
M[i,j] = score
M[j,i] = score
return M
def processRun(fastaFn, outFn=None, winSize=170, step=17, minlen=8000):
# def processRun(bedFn, fastaFn, outFn='',winSize=170,step=17):
"""Process BED and FASTA pair (if BED is provided)
Parameters
----------
fastaFn : str
FASTA file name (file contains nucleotide sequences)
outFn : str
Will output numpy NPZ file with this name if string provided (Default: None)
winSize : int
Width of alignment window in bp (default: 170)
step : int
Step size for alignment windows (default: 17)
minlen : int
Minimum read length (default: 8000)
Return
------
matD : dict
Dictionary of alignment matrices indexed/keyed on sequence names
"""
# Old code for running with BED file containing repeat monomer coordinates
# bedDf = pd.read_csv(bedFn, delimiter='\t', header=None)
# bedDf.columns = ['chrom','start','end','mono_class','score','strand','env_st','env_en','p_val']
matD = {}
ctr = 0
for seqObj in SeqIO.parse(fastaFn, format='fasta'):
if len(seqObj) >= minlen:
print ctr, '\t', seqObj.id
# matD[seqObj.id] = getAlnMatrix(seqObj.seq, bedDf.loc[bedDf.chrom == seqObj.id])
matD[seqObj.id] = getDotMatrix(str(seqObj.seq),winSize,step)
ctr += 1
if (outFn):
np.savez_compressed(outFn, **matD)
return matD
def processFolder(folder,winSize=170,step=17,minlen=8000):
"""Generate alignment matrices for BED:FASTA file pairs in a directory and write dictionary of matrices to compressed numpy NPZ file.
Parameters
----------
folder : str
Folder containing paired BED:FASTA files. BED file extension must be *.bed and FASTA file extension must be *.fa. Pairs are identified based on identical naming with the exception of *.bed and *.fa extensions.
winSize : int
Width of alignment window in bp (default: 170)
step : int
Step size for alignment windows (default: 17)
minlen : int
Minimum read length (default: 8000)
"""
fastaFiles = glob.glob(op.join(folder, '*.fa'))
# fastaFiles = [b[:-3] + 'fa' for b in bedFiles]
# bedFiles = [b[:-2] + 'bed' for b in fastaFiles]
folderName = op.join(folder, 'matrices_win_'+str(winSize)+'_step_'+str(step)+'_minL_'+str(minlen))
try:
os.makedirs(folderName)
except OSError:
pass
for faFn in fastaFiles:
print faFn
outFn = op.join(folderName,op.split(faFn[:-4])[1])+'.npz'
processRun(faFn,outFn,winSize,step,minlen)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f','--folder', type=str, dest="folder", help='Name of the directory to be processed', required=True)
parser.add_argument('-w','--win', type=int, dest="win",help='Width of alignment window (bp; default:170)',default=170)
parser.add_argument('-s','--step', type=int, dest="step",help='Step size (bp; default:17)',default=17)
parser.add_argument('-m','--minL', type=int, dest="len",help='Minimum read length(bp; default:8000)',default=8000)
args = parser.parse_args()
print "Generate matrices from a directory containing FASTA (*.fa) files of PacBio reads."
print "Folder path: ", args.folder
print "Window length: ", args.win
print "Step size: ", args.step
print "Minimum read length: ", args.len
processFolder(args.folder,args.win,args.step,args.len)
| mit |
Myasuka/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
asedunov/intellij-community | python/helpers/pydev/pydev_ipython/qt_for_kernel.py | 10 | 2497 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api, QT_API_PYQT5)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for qt backend from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| apache-2.0 |
luo66/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
brkrishna/freelance | amazon/process/spiders/reviews_spider.py | 1 | 1490 | # -- coding: utf-8 --
import scrapy, os
from scrapy.selector import HtmlXPathSelector
from process.items import RvwUrl
import pandas as pd
#Constants
BASE_URL = 'http://www.amazon.cn'
class RvwUrlSpider(scrapy.Spider):
name = "review_urls"
allowed_domains = ["amazon.cn"]
start_urls = [BASE_URL]
def parse(self, response):
review_urls = set(open('categories').readlines())
if os.path.isfile('categories_done'):
finished_review_urls = set(open('categories_done').readlines())
review_urls -= finished_review_urls
for url in review_urls:
yield scrapy.Request(url.strip(), callback=self.get_review_urls)
def get_review_urls(self, response):
urls = response.meta.get('redirect_urls', [response.url])
nodes = response.xpath('//*[@data-asin]/@data-asin').extract()
with open('review_urls', 'a') as sink:
for node in nodes:
sink.write("%s/gp/product-reviews/%s\n" % (BASE_URL, node))
more_buttons = response.xpath('//a[contains(@class, "dv-view-all")]/@href').extract()
if more_buttons:
for button in more_buttons:
yield scrapy.Request(BASE_URL + button, self.get_review_urls)
next_links = response.xpath('//a[@id="pagnNextLink"]/@href').extract()
if next_links:
for link in next_links:
yield scrapy.Request(BASE_URL + link, self.get_review_urls)
#Push the url to done queue
df_catg_done = pd.DataFrame(data = urls, columns=['link'])
df_catg_done.to_csv('categories_done', mode='a', header=False, index=False)
| gpl-2.0 |
xyguo/scikit-learn | sklearn/tests/test_cross_validation.py | 24 | 47465 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
boland1992/seissuite_iran | seissuite/sort_later/pointshape.py | 2 | 2014 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 12:28:32 2015
@author: boland
"""
import sys
sys.path.append("/home/boland/Anaconda/lib/python2.7/site-packages")
import fiona
import shapefile
from shapely import geometry
import numpy as np
import matplotlib.pyplot as plt
import pyproj
import datetime
from matplotlib.path import Path
#---------------------------------------------
#DEFINE INPUT PARAMETERS
#---------------------------------------------
#enter shapefile absolute or relative path name as string if optimal = True
#shape_path = "/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
#N = 130
def shape_(input_shape):
with fiona.open(input_shape) as fiona_collection:
# In this case, we'll assume the shapefile only has one record/layer (e.g., the shapefile
# is just for the borders of a single country, etc.).
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
shape = geometry.asShape( shapefile_record['geometry'] )
return shape
def points_in_shape(shape_path, N):
shape = shape_(shape_path)
minx, miny, maxx, maxy = shape.bounds
#print minx; print miny; print maxx; print maxy
#bounding_box = geometry.box(minx, miny, maxx, maxy)
#generate random points within bounding box!
N_total = 130**2
sf = shapefile.Reader(shape_path)
shape = sf.shapes()[0]
#find polygon nodes lat lons
verticies = shape.points
#convert to a matplotlib path class!
polygon = Path(verticies)
#points_in_shape = polygon.contains_points(coords)
#coords = coords[points_in_shape == True][0:N-1]
X = abs(maxx - minx) * np.random.rand(N_total,1) + minx
Y = abs(maxy - miny) * np.random.rand(N_total,1) + miny
coords = np.column_stack((X,Y))
points_in_shape = polygon.contains_points(coords)
coords = coords[points_in_shape == True][0:N]
return coords
| gpl-3.0 |
evanbiederstedt/RRBSfun | epiphen/cll_tests/total_CLL_chr11.py | 1 | 8306 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr11"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GACACG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG",
"RBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.ACAACC",
"RRBS_trito_pool_1_TAAGGCGA.ACGTGG",
"RRBS_trito_pool_1_TAAGGCGA.ACTCAC",
"RRBS_trito_pool_1_TAAGGCGA.ATAGCG",
"RRBS_trito_pool_1_TAAGGCGA.ATCGAC",
"RRBS_trito_pool_1_TAAGGCGA.CAAGAG",
"RRBS_trito_pool_1_TAAGGCGA.CATGAC",
"RRBS_trito_pool_1_TAAGGCGA.CCTTCG",
"RRBS_trito_pool_1_TAAGGCGA.CGGTAG",
"RRBS_trito_pool_1_TAAGGCGA.CTATTG",
"RRBS_trito_pool_1_TAAGGCGA.GACACG",
"RRBS_trito_pool_1_TAAGGCGA.GCATTC",
"RRBS_trito_pool_1_TAAGGCGA.GCTGCC",
"RRBS_trito_pool_1_TAAGGCGA.GGCATC",
"RRBS_trito_pool_1_TAAGGCGA.GTGAGG",
"RRBS_trito_pool_1_TAAGGCGA.GTTGAG",
"RRBS_trito_pool_1_TAAGGCGA.TAGCGG",
"RRBS_trito_pool_1_TAAGGCGA.TATCTC",
"RRBS_trito_pool_1_TAAGGCGA.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.TGACAG",
"RRBS_trito_pool_1_TAAGGCGA.TGCTGC",
"RRBS_trito_pool_2_CGTACTAG.ACAACC",
"RRBS_trito_pool_2_CGTACTAG.ACGTGG",
"RRBS_trito_pool_2_CGTACTAG.ACTCAC",
"RRBS_trito_pool_2_CGTACTAG.AGGATG",
"RRBS_trito_pool_2_CGTACTAG.ATAGCG",
"RRBS_trito_pool_2_CGTACTAG.ATCGAC",
"RRBS_trito_pool_2_CGTACTAG.CAAGAG",
"RRBS_trito_pool_2_CGTACTAG.CATGAC",
"RRBS_trito_pool_2_CGTACTAG.CCTTCG",
"RRBS_trito_pool_2_CGTACTAG.CGGTAG",
"RRBS_trito_pool_2_CGTACTAG.CTATTG",
"RRBS_trito_pool_2_CGTACTAG.GACACG",
"RRBS_trito_pool_2_CGTACTAG.GCATTC",
"RRBS_trito_pool_2_CGTACTAG.GCTGCC",
"RRBS_trito_pool_2_CGTACTAG.GGCATC",
"RRBS_trito_pool_2_CGTACTAG.GTGAGG",
"RRBS_trito_pool_2_CGTACTAG.GTTGAG",
"RRBS_trito_pool_2_CGTACTAG.TAGCGG",
"RRBS_trito_pool_2_CGTACTAG.TATCTC",
"RRBS_trito_pool_2_CGTACTAG.TCTCTG",
"RRBS_trito_pool_2_CGTACTAG.TGACAG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests")
tott.to_csv("total_CLL_chrom11.phy", header=None, index=None)
print(tott.shape)
| mit |
alexvicegrab/tango_master | foreign/itunes_applescript.py | 1 | 5712 | from functools import partial
import subprocess
from organiser.tm_utils import KeyMapper
FILE_UPDATE = """osascript -e '
tell application "iTunes"
set result to (first file track of playlist "Library" whose {})
set result'\\''s {} to {}
end tell'"""
CREATE_PLAYLIST = """osascript -e '
tell application "iTunes"
set NewPlaylist to (make new user playlist with properties {{name:"{}"}})
end tell'"""
PLAY_SONG = """osascript -e '
tell application "iTunes"
set result to (first file track of playlist "Library" whose {})
play result
delay {}
end tell
tell app "iTunes" to stop'"""
QUEUE_SONG = """osascript -e '
tell application "iTunes"
set NewPlaylist to (first playlist whose id is "{}")
set result to (first file track of playlist "Library" whose {})
copy result to the end of NewPlaylist
end tell'"""
REFRESH_SONG = """osascript -e '
tell application "iTunes"
set result to (first file track of playlist "Library" whose {})
refresh result
end tell'"""
_key_mapper = KeyMapper()
_key_mapper["Name"] = "Title"
_key_mapper["Comment"] = "Comments"
def _escape(value):
if isinstance(value, (str, bytes)):
return '"' + value.replace('"', '\\"').replace("'", "'\\''") + '"'
else:
return str(value)
def _selection(record):
# Get relevant fields according to whether we are looking at iTunes or Pandas
if "Artist" in record:
artist = record["Artist"]
else:
artist = record["Director"]
if "Singers" in record and type(record["Singers"]) != float:
artist += f" - {record['Singers']}"
if "Name" in record:
title = record["Name"]
else:
title = record["Title"]
album = record["Album"]
select = f"name is {_escape(title)} and artist contains {_escape(artist)} and album is {_escape(album)}"
if "Track ID" in record:
select += f" and Database ID is {record['Track ID']}"
return select
# Playlist functions
def create_playlist(playlist):
command = CREATE_PLAYLIST.format(_escape(playlist))
return subprocess.check_output(command, shell=True)
def play_song(record, duration):
command = PLAY_SONG.format(_selection(record), duration)
subprocess.call(command, shell=True)
def queue_song(playlist, record):
command = QUEUE_SONG.format(_escape(playlist), _selection(record))
subprocess.check_output(command, shell=True)
def refresh_song(record):
command = REFRESH_SONG.format(_selection(record))
subprocess.call(command, shell=True)
# Value functions
def set_key_value(record, value, key, prepend=""):
if record[_key_mapper[key]] != value:
# We only run this if record is incorrect
command = FILE_UPDATE.format(
_selection(record), key.lower(), prepend + _escape(value)
)
subprocess.call(command, shell=True)
record[_key_mapper[key]] = value
return True
return False
# TODO: We may be able to do this more cleanly
def set_artist(record, value):
try:
director, singers = value.split(" - ")
except:
director = record["Director"]
singers = ""
if record["Director"] != director or record["Singers"] != singers:
# We only run this if record is incorrect
command = FILE_UPDATE.format(
_selection(record), "Artist".lower(), _escape(value)
)
subprocess.call(command, shell=True)
record["Director"] = director
record["Singers"] = singers
return True
return False
def set_releasedate(record, value):
if value:
return set_key_value(record, value, "Release Date", prepend="date ")
else:
return set_key_value(record, "", "Release Date")
# TODO: Is there a more elegant way?
def set_love(record, value):
if record["Loved"] != value:
# We only run this if record is incorrect
if value == 1:
command = FILE_UPDATE.format(
_selection(record), "Loved".lower(), _escape("true")
)
if value == -1:
command = FILE_UPDATE.format(
_selection(record), "Disliked".lower(), _escape("true")
)
elif value == 0 and record["Loved"] == -1:
command = FILE_UPDATE.format(
_selection(record), "Disliked".lower(), _escape("false")
)
elif value == 0 and record["Loved"] == 1:
command = FILE_UPDATE.format(
_selection(record), "Loved".lower(), _escape("false")
)
subprocess.call(command, shell=True)
record["Loved"] = value
return True
return False
# Time functions
# TODO: Update later to use the pandas dataframe instead
def set_start(record, value):
command = FILE_UPDATE.format(_selection(record), "Start".lower(), round(value, 3))
subprocess.call(command, shell=True)
return True
# TODO: Update later to use the pandas dataframe instead
def set_finish(record, value):
command = FILE_UPDATE.format(_selection(record), "Finish".lower(), round(value, 3))
subprocess.call(command, shell=True)
return True
# TODO: Do we need these functions, or can we directly use the set_key_value where needed?
set_bpm = partial(set_key_value, key="BPM")
set_comment = partial(set_key_value, key="Comment")
set_composer = partial(set_key_value, key="Composer")
set_genre = partial(set_key_value, key="Genre")
set_grouping = partial(set_key_value, key="Grouping")
set_rating = partial(set_key_value, key="Rating")
set_title = partial(set_key_value, key="Name")
set_year = partial(set_key_value, key="Year")
| gpl-3.0 |
vortex-ape/scikit-learn | sklearn/cluster/tests/test_feature_agglomeration.py | 7 | 1783 | """
Tests for sklearn.cluster._feature_agglomeration
"""
# Authors: Sergul Aydore 2017
import numpy as np
from sklearn.cluster import FeatureAgglomeration
from sklearn.utils.testing import assert_true, assert_no_warnings
from sklearn.utils.testing import assert_array_almost_equal
def test_feature_agglomeration():
n_clusters = 1
X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features)
agglo_mean = FeatureAgglomeration(n_clusters=n_clusters,
pooling_func=np.mean)
agglo_median = FeatureAgglomeration(n_clusters=n_clusters,
pooling_func=np.median)
assert_no_warnings(agglo_mean.fit, X)
assert_no_warnings(agglo_median.fit, X)
assert_true(np.size(np.unique(agglo_mean.labels_)) == n_clusters)
assert_true(np.size(np.unique(agglo_median.labels_)) == n_clusters)
assert_true(np.size(agglo_mean.labels_) == X.shape[1])
assert_true(np.size(agglo_median.labels_) == X.shape[1])
# Test transform
Xt_mean = agglo_mean.transform(X)
Xt_median = agglo_median.transform(X)
assert_true(Xt_mean.shape[1] == n_clusters)
assert_true(Xt_median.shape[1] == n_clusters)
assert_true(Xt_mean == np.array([1 / 3.]))
assert_true(Xt_median == np.array([0.]))
# Test inverse transform
X_full_mean = agglo_mean.inverse_transform(Xt_mean)
X_full_median = agglo_median.inverse_transform(Xt_median)
assert_true(np.unique(X_full_mean[0]).size == n_clusters)
assert_true(np.unique(X_full_median[0]).size == n_clusters)
assert_array_almost_equal(agglo_mean.transform(X_full_mean),
Xt_mean)
assert_array_almost_equal(agglo_median.transform(X_full_median),
Xt_median)
| bsd-3-clause |
haramoz/RND-ss14 | DTreeRegression.py | 1 | 5828 | from sklearn import tree
from sklearn import cross_validation
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
from sklearn.metrics import mean_squared_error
from math import sqrt
import time
from pylab import *
class regressionUsingDtree:
def __init__(self):
pass
def normalizeColumnwiseData(self):
trainingData,desiredLabel = self.loadExperimentData()
#Step 1: Check how many columns are there
noOfColumns = len(trainingData[0])
trainDataArray = np.asarray(trainingData)
print trainingData.shape , noOfColumns
normalizedData = np.zeros(trainingData.shape)
for col in range(noOfColumns):
columnVal = np.asarray(trainingData[:,col])
#print len(columnVal) , len(trainingData)
#Step 2: For all the rows and specific column do the normalization
meanSubstracted = columnVal - np.mean(columnVal)
normalizedColumn = meanSubstracted/np.std(columnVal)
#print "alles gut"
#Step 3: Stack them vertically one by one
normalizedData[:,col] =normalizedColumn
#print normalizedData
return normalizedData,desiredLabel
def loadExperimentData(self):
path = "./tuft_real_data/22June/extractedFeatures/"
list_of_data_files = glob.glob(path+'data/*.csv')
list_of_data_files = sorted(list_of_data_files)
flagInitial = True
for file_name in list_of_data_files:
featureFileName = os.path.splitext(file_name)[0].split("/")[-1]
#print featureFileName
data = np.loadtxt(fname=file_name,delimiter=',')
if flagInitial:
flagInitial = False
trainData = data
else:
trainData = np.vstack((trainData,data))
#For reading the labels
list_of_label_files = glob.glob(path+'labels/*.csv')
list_of_label_files = sorted(list_of_label_files)
flagInitial = True
for file_name in list_of_label_files:
featureFileName = os.path.splitext(file_name)[0].split("/")[-1]
#print featureFileName
labels = np.loadtxt(fname=file_name,delimiter=',')
if flagInitial:
flagInitial = False
trainLabel = labels
else:
trainLabel = np.concatenate((trainLabel,labels),axis=0)
return trainData,trainLabel
def dtreeRegressor(self):
#trainingData,desiredLabel = self.loadExperimentData()
trainingData,desiredLabel = self.normalizeColumnwiseData()
coordinates_train, coordinates_test, windspeed_train, windspeed_test = cross_validation.train_test_split(trainingData,desiredLabel,test_size=0.30)
_, coordinates_predict, _, windspeed_predict = cross_validation.train_test_split(coordinates_test, windspeed_test,test_size=0.04)
curveFit = tree.DecisionTreeRegressor()
#curveFit = tree.DecisionTreeClassifier(max_leaf_nodes=6)
curveFit = curveFit.fit(coordinates_train, windspeed_train)
predicted_speed = curveFit.predict(coordinates_predict)
print "Prediction Score :", curveFit.score(coordinates_test, windspeed_test)
print curveFit.predict([.8,.4,.4,.5,.3,.2])
#predicted_speed = curveFit.predict(coordinates_predict)
mse = mean_squared_error(windspeed_test, curveFit.predict(coordinates_test))
rms = sqrt(mse)
print "mse on the test data: ",mse
errorbarValues = []
#errorbins = [-4,-3,-2,-1,0,1,2,3,4,5]
errorbins = np.arange(-30,30,1)
print "errorbins:", errorbins
for threshold in errorbins:
correct_estimation = 0
for i in range(len(predicted_speed)):
if (windspeed_predict[i] - predicted_speed[i] <= threshold) and (windspeed_predict[i] - predicted_speed[i] > threshold-1):
correct_estimation += 1
print "for threshold between: ", threshold ," and ",threshold-1," estimation: ", correct_estimation, " out of : ", len(windspeed_predict)
errorbarValues.append(correct_estimation)
###############################################################################
# look at the results
#plt.plot(np.arange(0,len(predicted_speed),1),predicted_speed, c='g')
fig = plt.figure()
ax = fig.add_subplot(111)
width = 0.4
ax.bar([i - width for i in errorbins],errorbarValues,width,color="y",alpha=0.7)
#ax.bar(errorbins,errorbarValues,width,color="y",alpha=0.7)
plt.xlabel("Estimation error(kmph)")
plt.ylabel("Number of observation")
plt.title("Error histogram DTR")
ax.set_xlim(-25,25)
plt.grid()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(np.arange(0,len(predicted_speed),1),predicted_speed, c='g',marker='+', label='predicted speed')
#plt.hold('on')
ax.scatter(np.arange(0,len(windspeed_predict),1),windspeed_predict, c='r',marker='x', label='Actual data')
ax.set_xlim(-2,len(windspeed_predict))
ax.set_ylim(8,45)
plt.xlabel('Number of test cases')
plt.ylabel('wind speed')
plt.title('Decision tree Regression')
ax.legend()
for i in range(len(predicted_speed)):
ax.annotate('', xy=(i, windspeed_predict[i]), xytext=(i, predicted_speed[i]),
arrowprops=dict(facecolor='b',alpha=0.5, shrink=0.03,headwidth=4.5,width=1.5,frac=0.4),
)
plt.show()
if __name__ == "__main__":
regressor = regressionUsingDtree()
start_time = time.time()
regressor.dtreeRegressor()
print("--- %s seconds ---" % (time.time() - start_time)) | gpl-2.0 |
exa-analytics/exatomic | exatomic/nwchem/tests/test_output.py | 2 | 2589 | # -*- coding: utf-8 -*-
## Copyright (c) 2015-2020, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
"""
Tests for :mod:`~exatomic.nwchem.output`
#############################################
"""
#import numpy as np
#import pandas as pd
from unittest import TestCase
from exatomic.base import resource
from exatomic.nwchem.output import Output
class TestNWChemOutput(TestCase):
def setUp(self):
self.mam1 = Output(resource('nw-ch3nh2-631g.out'))
self.mam2 = Output(resource('nw-ch3nh2-augccpvdz.out'))
self.nap_roa = Output(resource('nw-naproxen-def2tzvp-roa.out'))
self.meth_roa = Output(resource('nw-methyloxirane-def2tzvp-roa.out'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.mam2.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertEqual(self.mam2.atom.shape[0], 7)
def test_parse_orbital(self):
self.mam1.parse_orbital()
self.mam2.parse_orbital()
self.assertEqual(self.mam1.orbital.shape[0], 28)
self.assertEqual(self.mam2.orbital.shape[0], 91)
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.mam2.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertEqual(self.mam2.basis_set.shape[0], 57)
def test_parse_basis_set_order(self):
self.mam1.parse_basis_set_order()
self.mam2.parse_basis_set_order()
self.assertEqual(self.mam1.basis_set_order.shape[0], 28)
self.assertEqual(self.mam2.basis_set_order.shape[0], 91)
def test_parse_frame(self):
self.mam1.parse_frame()
self.mam2.parse_frame()
self.assertEqual(self.mam1.frame.shape[0], 1)
self.assertEqual(self.mam2.frame.shape[0], 1)
def test_parse_momatrix(self):
self.mam1.parse_momatrix()
self.mam2.parse_momatrix()
self.assertEqual(self.mam1.momatrix.shape[0], 784)
self.assertEqual(self.mam2.momatrix.shape[0], 8281)
def test_parse_roa(self):
self.nap_roa.parse_roa()
self.assertEqual(self.nap_roa.roa.shape[0], 10)
self.meth_roa.parse_roa()
self.assertEqual(self.meth_roa.roa.shape[0], 10)
def test_parse_gradient(self):
self.nap_roa.parse_gradient()
self.assertEqual(self.nap_roa.gradient.shape[0], 31)
self.meth_roa.parse_gradient()
self.assertEqual(self.meth_roa.gradient.shape[0], 10)
def test_to_universe(self):
self.mam1.to_universe()
self.mam2.to_universe()
| apache-2.0 |
ratnania/pigasus | tests/test_monge_ampere_mesh_picard.py | 1 | 8410 | # -*- coding: UTF-8 -*-
#! /usr/bin/python
from pigasus.utils.manager import context
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
from caid.cad_geometry import square
from caid.cad_geometry import circle
from caid.cad_geometry import quart_circle
from caid.cad_geometry import annulus
import numpy as np
from time import time
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
sys.stdout = open(filename.split('.py')[0]+'.txt', 'w')
# ... import picard from monge_ampere module
from pigasus.utils.load import load
monge_ampere = load("monge_ampere")
picard = monge_ampere.picard
# ...
abs = np.abs; sin = np.sin ; cos = np.cos ; exp = np.exp ; sqrt = np.sqrt
pi = np.pi; atan = np.arctan2 ; cosh = np.cosh
sech = lambda x: 1./cosh(x)
#-----------------------------------
try:
nx = int(sys.argv[1])
except:
nx = 15
try:
ny = int(sys.argv[2])
except:
ny = 15
try:
px = int(sys.argv[3])
except:
px = 2
try:
py = int(sys.argv[4])
except:
py = 2
geo = square(n=[nx,ny], p=[px,py])
#geo = circle(radius=1.,n=[nx,ny], p=[px,py])
#geo = quart_circle(n=[nx,ny], p=[px,py])
#geo = annulus(n=[nx,ny], p=[px,py])
#from caid.cad_geometry import cad_geometry as domain
#geo = domain("input/iter_inner.xml")
#-----------------------------------
#-----------------------------------
verbose = False
withTwoGrids = True
# p_H = [ 5, 5]
# p_h = [ 5, 5]
p_H = [ 3, 3]
p_h = [ 3, 3]
# p_H = [ 2, 2]
# p_h = [ 2, 2]
# TEST 1
# # p = 5
# rtol_H = 1.e-4
# rtol_h = 1.e-8
# p = 3
# rtol_H = 1.e-4
# rtol_h = 1.e-6
# p = 2
# rtol_H = 1.e-4
# rtol_h = 1.e-4
# TEST 3
# p = 3, 5
rtol_H = 1.e-3
rtol_h = 1.e-3
# # p = 2
## rtol_H = 1.e-2
## rtol_h = 1.e-2
rtol2_H = 1.e-6
# rtol2_h = 1.e-6
rtol2_h = 1.e-9
maxiter_H = 40
maxiter_h = 40
n_H = [7,7]
nstage = 1
#-----------------------------------
#-----------------------------------
# ...
# exact solution
# ...
C0 = 1.0
# ... test 1
rho0 = lambda x,y : 1.
C1 = 0.616805883732
t = 0.5
rho1 = lambda x,y : (1. + 5*exp(-50*abs((x-0.5-t)**2+(y-0.5)**2-0.09)))
# ...
# ... test 2
#rho0 = lambda x,y : 1.
#C1 = 1.75484181939
#rho1 = lambda x,y : ( 1. / (2. + cos(8*pi*sqrt((x-0.5)**2+(y-0.5)**2))))
# ...
# ... test 3
#rho0 = lambda x,y : 1.
#C1 = 0.285547502263
#rho1 = lambda x,y : (1. + 10*exp(-50*(y-0.5-0.25*sin(2*pi*x))**2))
# ...
# ...
#rho0 = lambda x,y : 1.
#t = 0.25
#C1 = 0.702563292151
#rho1 = lambda x,y : (1. + 5*exp(-50*abs((x-0.5-0.25*cos(2*pi*t))**2 \
# - (y-0.5-0.5 *sin(2*pi*t))**2 \
# - 0.01) ))
# ...
# ...
#rho0 = lambda x,y : 1.
#t = 1./3
#C1 = 0.831806957866
#rho1 = lambda x,y : ( 1. + 5*exp(-50*abs(y-0.5-0.25*sin(2*pi*x)*sin(2*pi*t))))
# ...
# ...
#rho0 = lambda x,y : 1.
#gamma = 5.
#lamb = 100.
#t = 0.75
#C1 = 0.832943327557
#x0 = t ; y0 = 0.2 + 0.5 * t ; x1 = 1. - t ; y1 = 0.8 - 0.5 * t
#u0 = lambda x,y : gamma * sech(lamb * ( x - x0 + y - y0 ))
#u1 = lambda x,y : gamma * sech(lamb * ( x - x1 + y - y1 ))
#rho1 = lambda x,y : ( 1. + u0(x,y) + u1(x,y))
# ...
# ... test7
#xc = 0.7 ; yc = 0.5
#C1 = 0.281648379406
#
#rho0 = lambda x,y : 1.
#r = lambda s,t : sqrt( (s-xc)**2 + (t-yc)**2 )
#theta = lambda s,t : atan(t-yc,s-xc)
#def rho1(s,t):
# r_ = r(s,t) ; t_ = theta(s,t)
# val = C1 * (1. + 9./(1. + (10*r_*cos(t_-20*r_**2))**2) )
# return val
# ...
## ...
# circle
#rho0 = lambda x,y : 1./pi
#C1 = 0.227475185932
#rho1 = lambda x,y : C1 * (1. + 5*exp(-25*abs((x-0.)**2+(y-0.)**2-0.4)))
## ...
# ...
# quart_circle
#rho0 = lambda x,y : 4./pi
#C1 = 2.91639889933
#rho1 = lambda x,y : C1 * ( 1. / (2. + cos(8*pi*sqrt((x-0.5)**2+(y-0.5)**2))))
# ...
# ... annulus
#C0 = 0.424413181542
#rho0 = lambda x,y : C0 * 1.
#C1 = 0.733393862165
#rho1 = lambda x,y : C1 * ( 1. / (2. + cos(8*pi*sqrt((x-0.5)**2+(y-0.5)**2))))
# ...
# ...
#list_r = np.genfromtxt('input/r.txt')
#list_ix = np.genfromtxt('input/ix.txt')
#list_iy = np.genfromtxt('input/iy.txt')
#
#list_kx = 2 * pi * np.asarray(list_ix)
#list_ky = 2 * pi * np.asarray(list_iy)
# ...
# ...
#C0 = 0.424413181542
#rho0 = lambda x,y : C0 * 1.
##C1 = 0.144432578196 # annulus, test 2
#C1 = 0.103412631611 # annulus, test 3
#def rho1(x,y):
# window = exp(-10*abs((x-0.)**2+(y-0.)**2-0.8**2))
# res = 0.
# for (kx,ky,r) in zip(list_kx,list_ky,list_r):
## res += r * (1+sin(kx*x)) * (1+sin(ky*y)) # test 2
# res += (1+sin(x)) * (1+sin(ky*y)) # test 3
# res *= window
# res += 1.5
# return res
# ...
# ...
#C0 = 0.0471623135665
#rho0 = lambda x,y : C0 * 1.
#C1 = 0.0223721108636 #ITER, test 2
##C1 = 0.0195621124256 #ITER, test 3
#def rho1(x,y):
# window = exp(-10*abs(0.5*(x-6.)**2+0.15*(y-0.5)**2-1.0**2))
# res = 0.
# for (kx,ky,r) in zip(list_kx,list_ky,list_r):
# res += r * (1+sin(kx*x)) * (1+sin(ky*y)) # test 2
## res += (1+sin(x)) * (1+sin(ky*y)) # test 3
# res *= window
# res += 1.5
# return res
# ...
# ...
n_h = []
for axis in range(0,2):
n = n_H[axis]
for i in range(0, nstage):
n = 2*n+1
n_h.append(n)
if withTwoGrids:
print(">>>> coarse grid ", n_H, " with splines of degree ", p_H)
print(">>>> fine grid ", n_h, " with splines of degree ", p_h)
if withTwoGrids:
geo_H = square(n=n_H, p=p_H)
geo_h = square(n=n_h, p=p_h)
# ...
# ...
# values of gradu.n at the boundary
# ...
def func_g(x,y):
return [x,y]
# ...
#-----------------------------------
# ...
# values of u at the boundary
# ...
bc_neumann={}
for data in geo_h.external_faces:
patch_id = int(data[0]) ; face_id = int(data[1])
bc_neumann[patch_id,face_id] = func_g
# ...
#-----------------------------------
# ...
tc = {}
tc['A'] = lambda x,y : [1., 0., 0., 1.]
tc['b'] = lambda x,y : [1.e-3]
tc['u'] = lambda x,y : [0.]
tc['f'] = lambda x,y : [0.]
tc['bc_neumann'] = bc_neumann
# ...
with context():
# PDE_H = picard(geometry=geo_H, testcase=tc)
# PDE_h = picard(geometry=geo_h, testcase=tc)
if withTwoGrids:
PDE_H = picard(geometry=geo_H, bc_neumann=bc_neumann)
PDE_h = picard(geometry=geo_h, bc_neumann=bc_neumann)
# ...
print(">>> Solving using Picard <<<")
# ...
if withTwoGrids:
if PDE_H.Dirichlet:
U_H = PDE_H.unknown_dirichlet
else:
U_H = PDE_H.unknown
if PDE_h.Dirichlet:
U_h = PDE_h.unknown_dirichlet
else:
U_h = PDE_h.unknown
# ...
# ...
c_rho = C0/C1
# ...
# ...
if withTwoGrids:
print("*****************************")
tb = time()
Errors_H, ErrorsH1_H = PDE_H.solve( rho0, rho1, c_rho=None, u0=None \
, maxiter=maxiter_H, rtol=rtol_H, rtol2=rtol2_h, verbose=verbose)
te = time()
print("Coarse solver converges after ", len(Errors_H) \
, " with final error ", Errors_H[-1] \
, " with final H1-error ", ErrorsH1_H[-1])
print("Elapsed time ", te-tb)
print("*****************************")
PDE_H.transferSolution(geo_H, U_H, geo_h, U_h)
u0 = U_h.get()
else:
u0 = np.zeros(PDE_h.size)
print("*****************************")
tb = time()
Errors_h, ErrorsH1_h = PDE_h.solve( rho0, rho1, c_rho=None, u0=u0 \
, maxiter=maxiter_h, rtol=rtol_h, rtol2=rtol2_h, verbose=verbose)
te = time()
print("Monge-Ampere eq. converges after ", len(Errors_h) \
, " with final error ", Errors_h[-1] \
, " with final H1-error ", ErrorsH1_h[-1])
print("Elapsed time ", te-tb)
print("*****************************")
if withTwoGrids:
uH = U_H.get()
uh = U_h.get()
if withTwoGrids:
print("Error-coarse ", np.abs(1.-PDE_H.norm(exact=PDE_H.Err_func)))
print("Error-fine ", np.abs(1.-PDE_h.norm(exact=PDE_h.Err_func)))
if withTwoGrids:
U_H.set(uH)
U_h.set(uh)
# ...
# ...
# PDE_H.plotMesh(ntx=60, nty=60)
# ...
if PLOT:
PDE_h.plotMesh(ntx=60, nty=60)
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
np.savetxt("Errors.txt", np.asarray(Errors_h))
if withTwoGrids:
PDE_H.free()
PDE_h.free()
| mit |
ZenDevelopmentSystems/scikit-learn | sklearn/ensemble/weight_boosting.py | 71 | 40664 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
MichalKononenko/T1erminator | learn_T1_NMR_2.py | 1 | 6800 | """
Created on Wed Jun 24 11:04:10 2015
Learn T1 NMR experiement run on TOPSPIN
T1 inversion recovery model defined in find_T1_model class
includes calls to run TOPSPIN commands- NMR experiment
@author: Kissan Mistry
"""
#imports and intializations
from __future__ import division
from t1_model import T1Model
from qinfer.distributions import UniformDistribution
#from qinfer.distributions import NormalDistribution
from qinfer.smc import SMCUpdater
from qinfer.resamplers import LiuWestResampler
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import time
import Lorentzian_fit as LF
from qinfer.expdesign import ExperimentDesigner
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
model = T1Model()
prior = UniformDistribution(np.array([0, 100]))
N_particles=100000
updater = SMCUpdater(model, N_particles, prior, resampler=LiuWestResampler(0.98),zero_weight_policy='reset')
designer=ExperimentDesigner(updater,opt_algo=1)
#Set the value of T1 to Learn, pick 1 value from prior
#true_model=prior.sample()
true_model=np.array([6.77], dtype=model.expparams_dtype)
performance_dtype = [
('expparams', 'float'),
('sim_outcome', 'float'),
('est_mean', 'float'),
]
#NMR EXPERIMENT Initialization*******************************
#going to normalize Mo max of 1.
#model.Mo=float(raw_input('Please enter Mo: '))
#dummy=float(raw_input('Waiting for Mo: '))
#Mo_norm=LF.lorentzfit('1_spectrum.txt')
#model.Mo=(Mo_norm/Mo_norm)
#
#to save output data
timestr = time.strftime("%Y%m%d-%H%M%S")
Saver = PdfPages(timestr+'.pdf')
save_exp=open(timestr+'_exp.txt','w')
save_out=open(timestr+'_out.txt','w')
save_mean=open(timestr+'_mean.txt','w')
#iterative process to find T1
trials=20
data = np.zeros((trials, 1), dtype=performance_dtype)
for idx_trials in xrange(trials):
log.info('trial: ' + str(idx_trials))
#CHOOSE EXPERIMENTAL PARAMETER****************************
guess_iter=50
guess_vec=np.zeros((guess_iter,1))
risk_vec=np.zeros((guess_iter,1))
designer.new_exp()
store_risk=100000000
for idx_guess in xrange(guess_iter):
# print 'guess iteration: '+ str(idx_guess)
# guess=np.array([[[0.1+(0.1*idx_guess)]]],dtype=model.expparams_dtype) #sweep guess/incremental increase
guess=np.array([model.particle_guess_heuristic(updater, 10000)], dtype=model.expparams_dtype) #generate guess from PGH
# print 'Your Guess is: '+ str(guess)
#evaluate bayes risk for the guess
current_risk=updater.bayes_risk(guess)
# print 'bayes_risk: ' + str(current_risk)
if current_risk<store_risk:
store_risk=current_risk
expparams=guess
risk_vec[idx_guess]=current_risk
guess_vec[idx_guess]=guess
log.debug('Your Tau is: ' + str(expparams))
#optimize that guess
# expparams=designer.design_expparams_field(guess,0,cost_scale_k=1,disp=False,maxiter=10000,maxfun=10000,store_guess=True,grad_h=1,)
# print 'Your Tau is: ' + str(expparams)
fig = plt.figure()
plt.scatter(guess_vec,risk_vec,s=1)
plt.title('Bayes Risk of Guesses, Best Guess= '+str(expparams))
plt.ylabel('Bayes Risk')
plt.xlabel(r'$\tau$'+' Guess')
Saver.savefig()
#THIS MANUALLY COMPARES THE BAYES RISK OF THE GUESS VALUE AND THE OPTIMIZED VALUE AND PLOTS IT FOR SHOW,
#TO SEE HOW IT IS CHOOSING THE BEST VALUE.
# guess_iter=100
# guess_vec=np.zeros((guess_iter,1))
# grisk_vec=np.zeros((guess_iter,1))
# tau_vec=np.zeros((guess_iter,1))
# trisk_vec=np.zeros((guess_iter,1))
# designer.new_exp()
# for idx_guess in xrange(guess_iter):
# print 'guess iteration: '+ str(idx_guess)
# guess=np.array([model.particle_guess_heuristic(updater,10000)],dtype=model.expparams_dtype )
# guess_risk=updater.bayes_risk(guess)
# print 'Your Guess is: '+ str(guess)
# guess_vec[idx_guess]=guess
# grisk_vec[idx_guess]=guess_risk
# expparams=designer.design_expparams_field(guess,0,cost_scale_k=10,disp=False,maxiter=10000,maxfun=10000,store_guess=False,grad_h=1,)
# tau_risk=updater.bayes_risk(expparams)
# print 'Your Tau is: ' + str(expparams)
# tau_vec[idx_guess]=expparams
# trisk_vec[idx_guess]=tau_risk
# fig1=plt.figure()
# plt.scatter(guess_vec,grisk_vec)
# fig2=plt.figure()
# plt.scatter(tau_vec,trisk_vec)
# expparams=np.array([guess_vec[np.argmin(grisk_vec)]],dtype=model.expparams_dtype)
#Try getting quantity for Fisher Information and Score
# score=model.score()
## expparams=np.array([np.linspace(1, 10, 1000)])
# expparams=model.pgh(updater,10000) #generate guess from PGH
#
# fisher=model.fisher_information(true_model,expparams)
#
#SIMULATE*******************************************************
#simulate outcomes- based on the true T1, and the chosen intial value
#will be replaced by actual data collection from NMR for Mz values
sim_outcome=model.simulate_experiment(true_model,expparams)
outcome=sim_outcome
#NMR EXPERIMENT*************************************************
#USE this instead of simualate when doing experiments in NMR
# outcome=np.array([[[float(raw_input('Enter obtained Mz: '))]]])
# dummy=float(raw_input('waiting for Mz'))
# Mz_value=LF.lorentzfit(str(idx_trials+2)+'_spectrum.txt')
# outcome=np.array([[[Mz_value/abs(Mo_norm)]]])
#Run SMC and update the posterior distribution
updater.update(outcome,expparams,check_for_resample=True)
#STORE DATA******************************************
data[idx_trials]['est_mean'] = updater.est_mean()
data[idx_trials]['sim_outcome'] = outcome
data[idx_trials]['expparams'] = expparams
save_exp.writelines(str(expparams)+'\n')
save_mean.write(str(updater.est_mean())+'\n')
save_out.write(str(outcome)+'\n')
# PLOT *******************************************
#plotting particles and weights
particles = updater.particle_locations
weights = updater.particle_weights
if idx_trials==0:
maxw=max(weights)
weights=weights/maxw #normalize the posterior
fig1 = plt.figure()
plt.axvline(updater.est_mean(), linestyle = '--', c = 'blue', linewidth =2,label='Est. Mean')
plt.axvline(true_model, linestyle = '--', c = 'red', linewidth = 2,label='True Model')
plt.scatter(particles,weights,s=0.1)
plt.title('Posterior Distribution T1= '+str(updater.est_mean()))
plt.ylabel('Normalized Weight')
plt.xlabel('Particles')
plt.legend()
Saver.savefig()
#END LOOP***************************************************
Saver.close()
save_exp.close()
save_mean.close()
save_out.close() | mit |
IndraVikas/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
ApolloAuto/apollo | modules/tools/plot_planning/plot_planning_speed.py | 3 | 4356 | #!/usr/bin/env python3
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
import threading
import gflags
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from cyber.python.cyber_py3 import cyber
from modules.control.proto import control_cmd_pb2
from modules.planning.proto import planning_pb2
LAST_TRAJ_DATA = []
LAST_TRAJ_T_DATA = []
CURRENT_TRAJ_DATA = []
CURRENT_TRAJ_T_DATA = []
INIT_V_DATA = []
INIT_T_DATA = []
begin_t = None
last_t = None
lock = threading.Lock()
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("data_length", 500, "Planning plot data length")
def callback(planning_pb):
global INIT_V_DATA, INIT_T_DATA
global CURRENT_TRAJ_DATA, LAST_TRAJ_DATA
global CURRENT_TRAJ_T_DATA, LAST_TRAJ_T_DATA
global begin_t, last_t
lock.acquire()
if begin_t is None:
begin_t = planning_pb.header.timestamp_sec
current_t = planning_pb.header.timestamp_sec
if last_t is not None and abs(current_t - last_t) > 1:
begin_t = planning_pb.header.timestamp_sec
LAST_TRAJ_DATA = []
LAST_TRAJ_T_DATA = []
CURRENT_TRAJ_DATA = []
CURRENT_TRAJ_T_DATA = []
INIT_V_DATA = []
INIT_T_DATA = []
INIT_T_DATA.append(current_t - begin_t)
INIT_V_DATA.append(planning_pb.debug.planning_data.init_point.v)
LAST_TRAJ_DATA = []
for v in CURRENT_TRAJ_DATA:
LAST_TRAJ_DATA.append(v)
LAST_TRAJ_T_DATA = []
for t in CURRENT_TRAJ_T_DATA:
LAST_TRAJ_T_DATA.append(t)
CURRENT_TRAJ_DATA = []
CURRENT_TRAJ_T_DATA = []
for traj_point in planning_pb.trajectory_point:
CURRENT_TRAJ_DATA.append(traj_point.v)
CURRENT_TRAJ_T_DATA.append(current_t - begin_t + traj_point.relative_time)
lock.release()
last_t = current_t
def listener():
cyber.init()
test_node = cyber.Node("planning_listener")
test_node.create_reader("/apollo/planning",
planning_pb2.ADCTrajectory, callback)
def compensate(data_list):
comp_data = [0] * FLAGS.data_length
comp_data.extend(data_list)
if len(comp_data) > FLAGS.data_length:
comp_data = comp_data[-FLAGS.data_length:]
return comp_data
def update(frame_number):
lock.acquire()
last_traj.set_xdata(LAST_TRAJ_T_DATA)
last_traj.set_ydata(LAST_TRAJ_DATA)
current_traj.set_xdata(CURRENT_TRAJ_T_DATA)
current_traj.set_ydata(CURRENT_TRAJ_DATA)
init_data_line.set_xdata(INIT_T_DATA)
init_data_line.set_ydata(INIT_V_DATA)
lock.release()
#brake_text.set_text('brake = %.1f' % brake_data[-1])
#throttle_text.set_text('throttle = %.1f' % throttle_data[-1])
if len(INIT_V_DATA) > 0:
init_data_text.set_text('init point v = %.1f' % INIT_V_DATA[-1])
if __name__ == '__main__':
argv = FLAGS(sys.argv)
listener()
fig, ax = plt.subplots()
X = range(FLAGS.data_length)
Xs = [i * -1 for i in X]
Xs.sort()
init_data_line, = ax.plot(
INIT_T_DATA, INIT_V_DATA, 'b', lw=2, alpha=0.7, label='init_point_v')
current_traj, = ax.plot(
CURRENT_TRAJ_T_DATA, CURRENT_TRAJ_DATA, 'r', lw=1, alpha=0.5, label='current_traj')
last_traj, = ax.plot(
LAST_TRAJ_T_DATA, LAST_TRAJ_DATA, 'g', lw=1, alpha=0.5, label='last_traj')
#brake_text = ax.text(0.75, 0.85, '', transform=ax.transAxes)
#throttle_text = ax.text(0.75, 0.90, '', transform=ax.transAxes)
init_data_text = ax.text(0.75, 0.95, '', transform=ax.transAxes)
ani = animation.FuncAnimation(fig, update, interval=100)
ax.set_ylim(-1, 30)
ax.set_xlim(-1, 60)
ax.legend(loc="upper left")
plt.show()
| apache-2.0 |
bryndin/tornado-flickr-api | tornado_flickrapi/objects.py | 1 | 70898 | # -*- encoding: utf8 -*-
"""
Object Oriented implementation of Flickr API.
Important notes:
- For consistency, the naming of methods might differ from the name
in the official API. Please check the method "docstring" to know
what is the implemented method.
- For methods which expect an object "id", either the 'id' string
or the object itself can be used as argument. Similar consideration
holds for lists of id's.
For instance if "photo_id" is expected you can give call the function
with named argument "photo=PhotoObject" or with the id string
"photo_id=id_string".
Author: Alexis Mignon (c)
email: alexis.mignon_at_gmail.com
Date: 05/08/2011
"""
import urllib2
from tornado.gen import coroutine, Return
import method_call
from flickrerrors import FlickrError
from reflection import caller, static_caller, FlickrAutoDoc
from UserList import UserList
import auth
try:
import Image
import cStringIO
except ImportError:
pass
def dict_converter(keys, func):
def convert(dict_):
for k in keys:
try:
dict_[k] = func(dict_[k])
except KeyError:
pass
return convert
class FlickrObject(object):
"""
Base Object for Flickr API Objects.
Flickr Objects are dynamically created from the
named arguments given to the constructor.
"""
__converters__ = [] # some functions used to convert some result field
__display__ = [] # The attribute to display when the object is converted
# to a string
__metaclass__ = FlickrAutoDoc
def __init__(self, **params):
params["loaded"] = False
self._set_properties(**params)
def _set_properties(self, **params):
for c in self.__class__.__converters__:
c(params)
self.__dict__.update(params)
def setToken(self, filename=None, token=None, token_key=None,
token_secret=None):
"""
Set the authentication token to use with the object.
"""
if token is None:
token = auth.token_factory(filename=filename, token_key=token_key,
token_secret=token_secret)
self.__dict__["token"] = token
def getToken(self):
"""
Get the authentication token is any.
"""
return self.__dict__.get("token", None)
def __getattr__(self, name):
if name == 'id' and name not in self.__dict__:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, name)
)
if name not in self.__dict__:
if not self.loaded:
self.load()
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, name
)
)
def __setattr__(self, name, values):
raise FlickrError("Readonly attribute")
def get(self, key, *args, **kwargs):
return self.__dict__.get(key, *args, **kwargs)
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
raise FlickrError("Read-only attribute")
def __str__(self):
vals = []
for k in self.__class__.__display__:
val_found = False
try:
value = self.__dict__[k]
val_found = True
except KeyError:
self.load()
try:
value = self.__dict__[k]
val_found = True
except KeyError:
pass
if not val_found:
continue
if isinstance(value, unicode):
value = value.encode("utf8")
if isinstance(value, str):
value = "'%s'" % value
else:
value = str(value)
if len(value) > 20:
value = value[:20] + "..."
vals.append("%s=%s" % (k, value))
return "%s(%s)" % (self.__class__.__name__, ", ".join(vals))
def __repr__(self):
return str(self)
def getInfo(self):
"""
Returns object information as a dictionnary.
Should be overriden.
"""
return {}
def load(self):
props = self.getInfo()
self.__dict__["loaded"] = True
self._set_properties(**props)
class FlickrList(UserList):
def __init__(self, data=[], info=None):
UserList.__init__(self, data)
self.info = info
def __str__(self):
return '%s;%s' % (str(self.data), str(self.info))
def __repr__(self):
return '%s;%s' % (repr(self.data), repr(self.info))
class Activity(FlickrObject):
@static_caller("flickr.activity.userPhotos")
def userPhotos(**args):
return args, _extract_activity_list
@static_caller("flickr.activity.userComments")
def userComments(**args):
return args, _extract_activity_list
class Blog(FlickrObject):
__display__ = ["id", "name"]
__converters__ = [
dict_converter(["needspassword"], bool),
]
__self_name__ = "blog_id"
@caller("flickr.blogs.postPhoto")
def postPhoto(self, **args):
return _format_id("photo", args), lambda r: None
class BlogService(FlickrObject):
__display__ = ["id", "text"]
__self_name__ = "service"
@caller("flickr.blogs.getList")
def getList(self, **args):
try:
args["service"] = args["service"].id
except (KeyError, AttributeError):
pass
def format_result(r, token=None):
return [Blog(token=token, **b)
for b in _check_list(r["blogs"]["blog"])]
return args, format_result
@caller("flickr.blogs.postPhoto")
def postPhoto(self, **args):
return _format_id(args), _none
@static_caller("flickr.blogs.getServices")
def getServices():
return ({},
lambda r: [BlogService(**s)
for s in _check_list(r["services"]["service"])]
)
class Camera(FlickrObject):
__display__ = ["name"]
__self_name__ = "camera"
class Brand(FlickrObject):
__display__ = ["name"]
__self_name__ = "brand"
@static_caller("flickr.cameras.getBrands")
def getList():
return ({},
lambda r: [Camera.Brand(**b) for b in r["brands"]["brand"]]
)
@caller("flickr.cameras.getBrandModels")
def getModels(self):
return ({},
lambda r: [Camera(**m) for m in r["cameras"]["camera"]]
)
class Collection(FlickrObject):
__display__ = ["id", "title"]
__self_name__ = "collection_id"
@caller("flickr.collections.getInfo")
def getInfo(self, **args):
def format_result(r):
collection = r["collection"]
icon_photos = _check_list(collection["iconphotos"]["photo"])
photos = []
for p in photos:
p["owner"] = Person(p["owner"])
photos.append(Photo(**p))
collection["iconphotos"] = photos
return collection
return args, format_result
@caller("flickr.stats.getCollectionStats")
def getStats(self, date, **args):
args["date"] = date
return args, lambda r: int(r["stats"]["views"])
@caller("flickr.collections.getTree")
def getTree(**args):
def format_result(r, token=None):
collections = _check_list(r["collections"])
collections_ = []
for c in collections:
sets = _check_list(c.pop("set"))
sets_ = [Photoset(token=token, **s) for s in sets]
collections_.append(Collection(token=token, sets=sets_, **c))
return collections_
return _format_id("user", args), format_result
class CommonInstitution(FlickrObject):
__display__ = ["id", "name"]
@static_caller("flickr.commons.getInstitutions")
def getInstitutions():
def format_result(r):
institutions = _check_list(r["institutions"]["institution"])
institutions_ = []
for i in institutions:
urls = _check_list(i['urls']['url'])
urls_ = []
for u in urls:
u["url"] = u.pop("text")
urls_.append(CommonInstitutionUrl(**u))
i["urls"] = urls_
institutions_.append(CommonInstitution(id=i["nsid"], **i))
return institutions_
return {}, format_result
class CommonInstitutionUrl(FlickrObject):
pass
class Contact(FlickrObject):
@static_caller("flickr.contacts.getList")
def getList(self, **args):
def format_result(r):
info = r["contacts"]
contacts = [Person(id=c["nsid"], **c)
for c in _check_list(info["contact"])]
return FlickrList(contacts, Info(**info))
return args, format_result
@static_caller("flickr.contacts.getListRecentlyUploaded")
def getListRecentlyUploaded(self, **args):
def format_result(r):
info = r["contacts"]
contacts = [Person(id=c["nsid"], **c)
for c in _check_list(info["contact"])]
return FlickrList(contacts, Info(**info))
return args, format_result
@static_caller("flickr.contacts.getTaggingSuggestions")
def getTaggingSuggestions(self, **args):
def format_result(r):
info = r["contacts"]
contacts = [Person(id=c["nsid"], **c)
for c in _check_list(info["contact"])]
return FlickrList(contacts, Info(**info))
return args, format_result
class Gallery(FlickrObject):
__display__ = ["id", "title"]
__converters__ = [
dict_converter(["date_create", "date_update", "count_photos",
"count_videos"], int),
]
__self_name__ = "gallery_id"
@caller("flickr.galleries.addPhoto")
def addPhoto(self, **args):
return _format_id("photo", args), _none
@static_caller("flickr.galleries.create")
def create(**args):
return _format_id("primary_photo"), lambda r: Gallery(**r["gallery"])
@caller("flickr.galleries.editMeta")
def editMedia(self, **args):
return args, _none
@caller("flickr.galleries.editPhoto")
def editPhoto(self, **args):
return _format_id("photo", args), _none
@caller("flickr.galleries.editPhotos")
def editPhotos(self, **args):
if "photos" in args:
args["photo_ids"] = [p.id for p in args.pop("photos")]
photo_ids = args["photo_ids"]
if isinstance(photo_ids, list):
args["photo_ids"] = ", ".join(photo_ids)
return _format_id("primary_photo", args), _none
@static_caller("flickr.urls.lookupGallery")
def getByUrl(url):
def format_result(r):
gallery = r["gallery"]
gallery["owner"] = Person(id=gallery["owner"])
return Gallery(**gallery)
return {'url': url}, format_result
@caller("flickr.galleries.getInfo")
def getInfo(self):
def format_result(r, token=None):
gallery = r["gallery"]
gallery["owner"] = Person(gallery["owner"])
pp_id = gallery.pop("primary_photo_id")
pp_secret = gallery.pop("primary_photo_secret")
pp_farm = gallery.pop("primary_photo_farm")
pp_server = gallery.pop("primary_photo_server")
gallery["primary_photo"] = Photo(id=pp_id, secret=pp_secret,
server=pp_server, farm=pp_farm,
token=token)
return gallery
return {}, format_result
@caller("flickr.galleries.getPhotos")
def getPhotos(self, **args):
return _format_extras(args), _extract_photo_list
class Category(FlickrObject):
__display__ = ["id", "name"]
class Info(FlickrObject):
__converters__ = [
dict_converter(["page", "perpage", "pages", "total", "count"], int)
]
__display__ = ["page", "perpage", "pages", "total", "count"]
pass
class Group(FlickrObject):
__converters__ = [
dict_converter(["members", "privacy"], int),
dict_converter(["admin", "eighteenplus", "invistation_only"], bool)
]
__display__ = ["id", "name"]
__self_name__ = "group_id"
class Topic(FlickrObject):
__display__ = ["id", "subject"]
__self_name__ = "topic_id"
class Reply(FlickrObject):
__display__ = ["id"]
__self_name__ = "reply_id"
def getToken(self):
return self.topic.getToken()
@staticmethod
def _format_reply(reply):
author = {
'id': reply.pop("author"),
'role': reply.pop("role"),
'is_pro': bool(reply.pop("is_pro")),
}
reply["author"] = Person(**author)
return reply
@caller("flickr.groups.discuss.replies.getInfo")
def getInfo(self, **args):
return args, lambda r: self._format_reply(r["reply"])
@caller("flickr.groups.discuss.replies.delete")
def delete(self, **args):
args["topic_id"] = self.topic.id
def getToken(self):
return self.group.getToken()
@caller("flickr.groups.discuss.replies.add")
def addReply(self, **args):
return args, _none
@staticmethod
def _format_topic(topic):
""" reformat a topic dict
"""
author = {
'id': topic.pop("author"),
'is_pro': bool(topic.pop('is_pro')),
'role': topic.pop("role"),
}
topic["author"] = Person(**author)
return topic
@caller("flickr.groups.discuss.topics.getInfo")
def getInfo(self, **args):
def format_result(r):
return self._format_topic(r["topic"])
return args, format_result
@caller("flickr.groups.discuss.replies.getList")
def getReplies(self, **args):
def format_result(r):
info = r["replies"]
return FlickrList(
[Group.Topic.Reply(topic=self,
**Group.Topic.Reply._format_reply(rep))
for rep in info.pop("reply", [])],
Info(**info)
)
return args, format_result
@caller("flickr.groups.discuss.replies.delete")
def delete(self, **args):
args["topic_id"] = self.topic.id
return args, _none
@caller("flickr.groups.discuss.replies.edit")
def edit(self, **args):
args["topic_id"] = self.topic.id
return args, _none
@caller("flickr.groups.discuss.topics.add")
def addDiscussTopic(**args):
return args, _none
@static_caller("flickr.groups.browse")
def browse(**args):
def format_result(r, token):
cat = r["category"]
subcats = [Category(**c) for c in _check_list(cat.pop("subcats"))]
groups = [Group(id=g["nsid"], **g)
for g in _check_list(cat.pop("group"))]
return Category(id=args["cat_id"], subcats=subcats, groups=groups,
**cat)
return _format_id("cat", args), format_result
@caller("flickr.groups.getInfo")
def getInfo(self, **args):
return args, lambda r: r["group"]
@caller("flickr.urls.getGroup")
def getUrl(self, **args):
return args, lambda r: r["group"]["url"]
@static_caller("flickr.urls.lookupGroup")
def getByUrl(url, **args):
args["url"] = url
def format_result(r):
group = r["group"]
group["name"] = group.pop("groupname")
return Group(**group)
return args, format_result
@static_caller("flickr.groups.search")
def search(**args):
def format_result(r, token):
info = r["groups"]
groups = [Group(id=g["nsid"], **g) for g in info.pop("group")]
return FlickrList(groups, Info(**info))
return args, format_result
@caller("flickr.groups.members.getList")
def getMembers(self, **args):
try:
membertypes = args["membertypes"]
if isinstance(membertypes, list):
args["membertypes"] = ", ".join([str(i) for i in membertypes])
except KeyError:
pass
def format_result(r):
info = r["members"]
return FlickrList(
[Person(**p) for p in _check_list(info.pop("member"))],
Info(**info)
)
return args, format_result
@caller("flickr.groups.pools.add")
def addPhoto(self, **args):
return _format_id("photo", args), _none
@caller("flickr.groups.pools.getContext")
def getPoolContext(self, **args):
return (_format_id("photo", args),
lambda r: (Photo(**r["prevphoto"]), Photo(r["nextphoto"]))
)
@caller("flickr.groups.discuss.topics.getList")
def getDiscussTopics(self, **args):
def format_result(r):
info = r["topics"]
return FlickrList(
[Group.Topic(group=self, **Group.Topic._format_topic(t))
for t in info.pop("topic", [])],
Info(**info)
)
return args, format_result
@static_caller("flickr.groups.pools.getGroups")
def getGroups(**args):
def format_result(r, token):
info = r["groups"]
return FlickrList(
[Group(token=token, **g) for g in info.pop("group", [])],
Info(**info)
)
return args, format_result
@static_caller("flickr.people.getGroups")
def getMemberGroups(**args):
def format_result(r, token):
info = r["groups"]
return FlickrList(
[Group(token=token, **g) for g in info.pop("group", [])],
Info(**info)
)
return args, format_result
@caller("flickr.groups.pools.getPhotos")
def getPhotos(self, **args):
return _format_extras(args), _extract_photo_list
@caller("flickr.groups.pools.remove")
def removePhoto(self, **args):
return _format_id("photo", args), _none
@caller("flickr.groups.join")
def join(self, **args):
return args, _none
@caller("flickr.groups.joinRequest")
def joinRequest(self, **args):
return args, _none
@caller("flickr.groups.leave")
def leave(self, **args):
return args, _none
class License(FlickrObject):
__display__ = ["id", "name"]
__self_name__ = "license_id"
@static_caller("flickr.photos.licenses.getInfo")
def getList():
def format_result(r):
licenses = r["licenses"]["license"]
if not isinstance(licenses, list):
licenses = [licenses]
return [License(**l) for l in licenses]
return {}, format_result
class Location(FlickrObject):
__display__ = ["latitude", "longitude", "accuracy"]
__converters__ = [
dict_converter(["latitude", "longitude"], float),
dict_converter(["accuracy"], int),
]
class MachineTag(FlickrObject):
class Namespace(FlickrObject):
__display__ = ["text", "usage", "predicate"]
class Pair(FlickrObject):
__display__ = ["namespace", "text", "usage", "predicate"]
class Predicate(FlickrObject):
__display__ = ["usage", "text", "namespaces"]
class Value(FlickrObject):
__display__ = ["usage", "namespace", "predicate", "text"]
@static_caller("flickr.machinetags.getNamespaces")
def getNamespaces(**args):
def format_result(r):
info = r["namespaces"]
return FlickrList(
[MachineTag.Namespace(**ns)
for ns in _check_list(info.pop("namespace"))],
Info(info)
)
return args, format_result
@static_caller("flickr.machinetags.getPairs")
def getPairs(**args):
def format_result(r):
info = r["pairs"]
return FlickrList(
[MachineTag.Pair(**p) for p in _check_list(info.pop("pair"))],
Info(info)
)
return args, format_result
@static_caller("flickr.machinetags.getPredicates")
def getPredicates(**args):
def format_result(r):
info = r["predicates"]
return FlickrList(
[MachineTag.Predicate(**p)
for p in _check_list(info.pop("predicate"))],
Info(info)
)
return args, format_result
@static_caller("flickr.machinetags.getRecentValues")
def getRecentValues(**args):
def format_result(r):
info = r["values"]
return FlickrList(
[MachineTag.Value(**v)
for v in _check_list(info.pop("value"))],
Info(info)
)
return args, format_result
@static_caller("flickr.machinetags.getValues")
def getValues(**args):
def format_result(r):
info = r["values"]
return FlickrList(
[MachineTag.Value(**v)
for v in _check_list(info.pop("value"))],
Info(info)
)
return args, format_result
class Panda(FlickrObject):
__display__ = ["name"]
__self_name__ = 'panda_name'
@static_caller("flickr.panda.getList")
def getList():
return (
{},
lambda r: [Panda(name=p, id=p) for p in r["pandas"]["panda"]]
)
@caller("flickr.panda.getPhotos")
def getPhotos(self, **args):
return _format_extras(args), _extract_photo_list
class Person(FlickrObject):
__converters__ = [
dict_converter(["ispro"], bool),
]
__display__ = ["id", "username"]
__self_name__ = "user_id"
def __init__(self, **params):
if not "id" in params:
if "nsid" in params:
params["id"] = params["nsid"]
else:
raise ValueError("The 'id' or 'nsid' parameter is required")
FlickrObject.__init__(self, **params)
@caller("flickr.photos.geo.batchCorrectLocation")
def batchCorrectLocation(self, **args):
return _format_id("place", args), _none
@staticmethod
def getFromToken(token=None, filename=None, token_key=None,
token_secret=None):
"""
Retrieve the person corresponding to the authentication token.
"""
if token is None:
token = auth.token_factory(filename=filename, token_key=token_key,
token_secret=token_secret)
return test.login(token=token)
@static_caller("flickr.people.findByEmail")
def findByEmail(find_email):
return {'find_email': find_email}, lambda r: Person(**r["user"])
@static_caller("flickr.people.findByUsername")
def findByUserName(username):
return {'username': username}, lambda r: Person(**r["user"])
@static_caller("flickr.urls.lookupUser")
def findByUrl(url):
return {'url': url}, lambda r: Person(**r["user"])
@caller("flickr.favorites.getContext")
def getFavoriteContext(self, **args):
def format_result(r, token=None):
return (Photo(token=token, **r["prevphoto"]),
Photo(token=token, **r["nextphoto"]))
return _format_id("photo", args), format_result
@caller("flickr.favorites.getList")
def getFavorites(self, **args):
return _format_extras(args), _extract_photo_list
@caller("flickr.photosets.getList")
def getPhotosets(self, **args):
def format_result(r, token=None):
info = r["photosets"]
photosets = info.pop("photoset")
if not isinstance(photosets, list):
phototsets = [photosets]
return FlickrList(
[Photoset(token=token, **ps) for ps in photosets],
Info(**info)
)
return args, format_result
@caller("flickr.favorites.getPublicList")
def getPublicFavorites(self, **args):
return _format_extras(args), _extract_photo_list
@caller("flickr.people.getInfo")
def getInfo(self, **args):
def format_result(r):
user = r["person"]
user["photos_info"] = user.pop("photos")
return user
return args, format_result
@caller("flickr.galleries.getList")
def getGalleries(self, **args):
def format_result(r, token=True):
info = r["galleries"]
galleries = _check_list(info.pop("gallery"))
galleries_ = []
for g in galleries_:
g["owner"] = Person(g["owner"])
pp_id = g.pop("primary_photo_id")
pp_secret = g.pop("primary_photo_secret")
pp_farm = g.pop("primary_photo_farm")
pp_server = g.pop("primary_photo_server")
g["primary_photo"] = Gallery(id=pp_id, secret=pp_secret,
server=pp_server,
farm=pp_farm, token=token)
galleries_.append(g)
return FlickrList(galleries_, Info(**info))
return args, format_result
@caller("flickr.people.getLimits")
def getLimits(self):
return {}, lambda r: r
@caller("flickr.photos.getCounts")
def getPhotoCounts(self, **args):
return args, lambda r: r["photocounts"]["photocount"]
@caller("flickr.people.getPhotos")
def getPhotos(self, **args):
return args, _extract_photo_list
@caller("flickr.urls.getUserPhotos")
def getPhotosUrl(self):
return {}, lambda r: r["user"]["url"]
@caller("flickr.urls.getUserProfile")
def getProfileUrl(self):
return {}, lambda r: r["user"]["url"]
@caller("flickr.people.getPublicPhotos")
def getPublicPhotos(self, **args):
return args, _extract_photo_list
@caller("flickr.people.getPhotosOf")
def getPhotosOf(self, **args):
return (_format_id("owner", _format_extras(args)),
lambda r: _extract_photo_list(r, token=self.Token()))
@caller("flickr.contacts.getPublicList")
def getPublicContacts(self, **args):
def format_result(r, token=None):
info = r["contacts"]
contacts = [Person(id=c["nsid"], token=token, **c)
for c in _check_list(info["contact"])]
return FlickrList(contacts, Info(**info))
return args, format_result
@caller("flickr.people.getPublicGroups")
def getPublicGroups(self, **args):
def format_result(r, token=None):
groups = r["groups"]["group"]
groups_ = []
for gr in groups:
gr["id"] = gr["nsid"]
groups_.append(Group(token=token, **gr))
return groups_
return args, format_result
@static_caller("flickr.people.getUploadStatus")
def getUploadStatus(**args):
return args, lambda r: r["user"]
@caller("flickr.collections.getTree")
def getCollectionTree(**args):
def format_result(r, token=None):
collections = _check_list(r["collections"])
collections_ = []
for c in collections:
sets = _check_list(c.pop("set"))
sets_ = [Photoset(token=token, **s) for s in sets]
collections_.append(Collection(token=token, sets=sets_, **c))
return collections_
return _format_id("collection", args), format_result
@caller("flickr.photos.getContactsPublicPhotos")
def getContactsPublicPhotos(self, **args):
return (_format_extras(args),
lambda r: _extract_photo_list(r, token=self.getToken())
)
@caller("flickr.tags.getListUser")
def getTags(self):
return {}, lambda r: [Tag(**t) for t in r["who"]["tags"]["tag"]]
@caller("flickr.tags.getListUserPopular")
def getPopularTags(**args):
return args, lambda r: [Tag(**t) for t in r["who"]["tags"]["tag"]]
@caller("flickr.favorites.remove")
def removeFromFavorites(self, **args):
return _format_id("photo", args), _none
@static_caller("flickr.photos.getNotInSet")
def getNotInSetPhotos(**args):
return _format_extras(args), _extract_photo_list
class Photo(FlickrObject):
__converters__ = [
dict_converter(["isfamily", "ispublic", "isfriend", "cancomment",
"canaddmeta", "permcomment", "permmeta", "isfavorite"],
bool),
dict_converter(["posted", "lastupdate"], int),
dict_converter(["views", "comments"], int),
]
__display__ = ["id", "title"]
__self_name__ = "photo_id"
class Comment(FlickrObject):
__display__ = ["id", "author"]
__self_name__ = "comment_id"
@caller("flickr.photos.comments.deleteComment")
def delete(self, **args):
return args, _none
@caller("flickr.photos.comments.editComment")
def edit(self, **args):
return args, _none
@static_caller("flickr.photos.comments.getRecentForContacts")
def getRecentForContacts(**args):
return _format_extras(args), _extract_photo_list
class Exif(FlickrObject):
__display__ = ["tag", "raw"]
class Note(FlickrObject):
__display__ = ["id", "text"]
__self_name__ = "note_id"
@caller("flickr.photos.notes.edit")
def edit(self, **args):
return args, _none
@caller("flickr.photos.notes.delete")
def delete(self, **args):
return args, _none
class Suggestion(FlickrObject):
__display__ = ["id"]
__self_name__ = "suggestion_id"
@caller("flickr.photos.suggestions.approveSuggestion")
def approve(self):
return {}, _none
@caller("flickr.photos.suggestions.rejectSuggestion")
def reject(self):
return {}, _none
@caller("flickr.photos.suggestions.removeSuggestion")
def remove(self):
return {}, _none
@caller("flickr.photos.people.delete")
def deletePerson(self, **args):
return _format_id("user", args), _none
@caller("flickr.photos.people.deleteCoords")
def deletePersonCoords(self, **args):
return _format_id("user", args), _none
@caller("flickr.photos.people.editCoords")
def editPersonCoords(self, **args):
return _format_id("user", args), _none
@caller("flickr.photos.comments.addComment")
def addComment(self, **args):
def format_result(r, token=None):
args["id"] = r["comment"]["id"]
args["photo"] = self
return Photo.Comment(**args)
return args, format_result
@caller("flickr.photos.notes.add")
def addNote(self, **args):
def format_result(r, token=None):
args["id"] = r["note"]["id"]
args["photo"] = self
return Photo.Note(**args)
return args, format_result
@caller("flickr.photos.people.add")
def addPerson(self, **args):
return _format_id("user", args), _none
@caller("flickr.photos.addTags")
def addTags(self, tags, **args):
if isinstance(tags, list):
tags = ", ".join(tags)
args["tags"] = tags
return args, _none
@caller("flickr.favorites.add")
def addToFavorites(self):
return {}, _none
@caller("flickr.photos.geo.correctLocation")
def correctLocation(self, **args):
return _format_id("place", args), _none
@static_caller("flickr.photos.upload.checkTickets")
def checkUploadTickets(tickets, **args):
def format_result(r, token=None):
tickets = r["uploader"]["ticket"]
if not isinstance(tickets, list):
tickets = [tickets]
return [UploadTicket(**t) for t in tickets]
args["tickets"] = ','.join(tickets)
return args, format_result
@caller("flickr.photos.delete")
def delete(self, **args):
return args, _none
@caller("flickr.photos.getAllContexts")
def getAllContexts(self, **args):
def format_result(r, token=None):
photosets = []
if "set" in r:
for s in r["set"]:
photosets.append(Photoset(token=token, **s))
pools = []
if "pool" in r:
for p in r["pool"]:
pools.append(Group(token=token, **p))
return photosets, pools
return args, format_result
@caller("flickr.photos.comments.getList")
def getComments(self, **args):
def format_result(r, token=None):
try:
comments = r["comments"]["comment"]
except KeyError:
comments = []
comments_ = []
if not isinstance(comments, list):
comments = [comments]
for c in comments:
author = c["author"]
authorname = c.pop("authorname")
c["author"] = Person(id=author, username=authorname,
token=token)
comments_.append(Photo.Comment(token=token, photo=self, **c))
return comments_
return args, format_result
@caller("flickr.photos.getInfo")
def getInfo(self, **args):
def format_result(r, token=None):
photo = r["photo"]
owner = photo["owner"]
owner["id"] = owner["nsid"]
photo["owner"] = Person(token=token, **owner)
photo.update(photo.pop("usage"))
photo.update(photo.pop("visibility"))
photo.update(photo.pop("publiceditability"))
photo.update(photo.pop("dates"))
tags = []
for t in _check_list(photo["tags"]["tag"]):
t["author"] = Person(token=token, id=t.pop("author"))
tags.append(Tag(token=token, **t))
photo["tags"] = tags
photo["notes"] = [
Photo.Note(token=token, **n)
for n in _check_list(photo["notes"]["note"])
]
sizes = photo.pop("sizes", None)
if sizes:
photo["sizes"] = dict([(s['label'], s) for s in sizes["size"]])
return photo
return args, format_result
@caller("flickr.photos.getContactsPhotos")
def getContactsPhotos(self, **args):
def format_result(r, token=None):
photos = r["photos"]["photo"]
photos_ = []
for p in photos:
photos_.append(Photo(token=token, **p))
return photos_
return args, format_result
@caller("flickr.photos.getContext")
def getContext(self, **args):
def format_result(r, token):
return (Photo(token=token, **r["prevphoto"]),
Photo(token=token, **r["nextphoto"]))
return args, format_result
@caller("flickr.photos.getExif")
def getExif(self, **args):
if hasattr(self, "secret"):
args["secret"] = self.secret
def format_result(r):
try:
return [Photo.Exif(**e) for e in r["photo"]["exif"]]
except KeyError:
return []
return args, format_result
@caller("flickr.favorites.getContext")
def getFavoriteContext(self, **args):
def format_result(r, token):
return (Photo(token=token, **r["prevphoto"]),
Photo(token=token, **r["nextphoto"]))
return _format_id("user", args), format_result
@caller("flickr.photos.getFavorites")
def getFavorites(self, **args):
def format_result(r, token):
photo = r["photo"]
persons = photo.pop("person")
persons_ = []
if not isinstance(persons, list):
persons = [persons]
for p in persons:
p["id"] = p["nsid"]
persons_.append(Person(token=token, **p))
infos = Info(**photo)
return FlickrList(persons_, infos)
return args, format_result
@caller("flickr.galleries.getListForPhoto")
def getGalleries(self, **args):
def format_result(r):
info = r["galleries"]
galleries = _check_list(info.pop("gallery"))
galleries_ = []
for g in galleries_:
g["owner"] = Person(g["owner"])
pp_id = g.pop("primary_photo_id")
pp_secret = g.pop("primary_photo_secret")
pp_farm = g.pop("primary_photo_farm")
pp_server = g.pop("primary_photo_server")
g["primary_photo"] = Gallery(
id=pp_id, secret=pp_secret,
server=pp_server, farm=pp_farm
)
galleries_.append(g)
return FlickrList(galleries_, Info(**info))
return args, format_result
@caller("flickr.photos.geo.getPerms")
def getGeoPerms(self, **args):
return args, lambda r, token: PhotoGeoPerms(token=token, **r["perms"])
@caller("flickr.photos.geo.getLocation")
def getLocation(self, **args):
def format_result(r, token):
loc = r["photo"]["location"]
return Location(token=token, photo=self, **loc)
return args, format_result
def getNotes(self):
"""
Returns the list of notes for a photograph
"""
return self.notes
@static_caller("flickr.interestingness.getList")
def getInteresting(**args):
return _format_extras(args), _extract_photo_list
@static_caller("flickr.photos.getRecent")
def getRecent(**args):
return _format_extras(args), _extract_photo_list
@caller("flickr.photos.suggestions.getList")
def getSuggestions(self, **args):
def format_result(r):
info = r["suggestions"]
suggestions_ = _check_list(info.pop("suggestion"))
suggestions = []
for s in suggestions_:
if "photo_id" in s:
s["photo"] = s.pop(Photo(id=s.pop("photo_id")))
if "suggested_by" in s:
s["suggested_by"] = Person(id=s["suggested_by"])
suggestions.append(Photo.Suggestion(**s))
return FlickrList(suggestions, info=Info(**info))
return args, format_result
@caller("flickr.photos.getSizes")
def _getSizes(self, **args):
def format_result(r):
return dict([(s["label"], s) for s in r["sizes"]["size"]])
return args, format_result
def getSizes(self, **args):
if "sizes" not in self.__dict__:
self.__dict__["sizes"] = self._getSizes(**args)
return self.sizes
@caller("flickr.stats.getPhotoStats")
def getStats(self, date, **args):
args["date"] = date
return (
args,
lambda r: dict([(k, int(v)) for k, v in r["stats"].iteritems()])
)
@caller("flickr.tags.getListPhoto")
def getTags(self, **args):
return args, lambda r: [Tag(**t) for t in r["photo"]["tags"]["tag"]]
def getPageUrl(self):
"""
returns the URL to the photo's page.
"""
return "http://www.flickr.com/photos/%s/%s" % (self.owner.id, self.id)
@caller("flickr.photos.getPerms")
def getPerms(self):
return {}, lambda r: r
def _getLargestSizeLabel(self):
"""
returns the largest size for the current photo.
"""
sizes = self.getSizes()
max_size = None
max_area = None
for sl, s in sizes.iteritems():
area = int(s["height"]) * int(s["width"])
if max_area is None or area > max_area:
max_size = sl
max_area = area
return max_size
def getPhotoUrl(self, size_label=None):
"""
returns the URL to the photo page corresponding to the
given size.
Arguments:
size_label: The label corresponding to the photo size
'Square': 75x75
'Thumbnail': 100 on longest side
'Small': 240 on longest side
'Medium': 500 on longest side
'Medium 640': 640 on longest side
'Large': 1024 on longest side
'Original': original photo (not always available)
"""
if size_label is None:
size_label = self._getLargestSizeLabel()
try:
return self.getSizes()[size_label]["url"]
except KeyError:
raise FlickrError("The requested size is not available")
def getPhotoFile(self, size_label=None):
"""
returns the URL to the photo file corresponding to the
given size.
Arguments:
size_label: The label corresponding to the photo size
'Square': 75x75
'Thumbnail': 100 on longest side
'Small': 240 on longest side
'Medium': 500 on longest side
'Medium 640': 640 on longest side
'Large': 1024 on longest side
'Original': original photo (not always available)
"""
if size_label is None:
size_label = self._getLargestSizeLabel()
try:
return self.getSizes()[size_label]["source"]
except KeyError:
raise FlickrError("The requested size is not available")
def save(self, filename, size_label=None):
"""
saves the photo corresponding to the
given size.
Arguments:
filename: target file name
size_label: The label corresponding to the photo size
'Square': 75x75
'Thumbnail': 100 on longest side
'Small': 240 on longest side
'Medium': 500 on longest side
'Medium 640': 640 on longest side
'Large': 1024 on longest side
'Original': original photo (not always available)
"""
if size_label is None:
size_label = self._getLargestSizeLabel()
r = urllib2.urlopen(self.getPhotoFile(size_label))
with open(filename, 'wb') as f:
f.write(r.read())
f.close()
def show(self, size_label=None):
"""
Shows the photo corresponding to the
given size.
Note: This methods uses PIL
Arguments:
filename: target file name
size_label: The label corresponding to the photo size
'Square': 75x75
'Thumbnail': 100 on longest side
'Small': 240 on longest side
'Medium': 500 on longest side
'Medium 640': 640 on longest side
'Large': 1024 on longest side
'Original': original photo (not always available)
"""
if size_label is None:
size_label = self._getLargestSizeLabel()
r = urllib2.urlopen(self.getPhotoFile(size_label))
b = cStringIO.StringIO(r.read())
Image.open(b).show()
@static_caller("flickr.photos.getUntagged")
def getUntagged(**args):
return _format_extras(args), _extract_photo_list
@static_caller("flickr.photos.getWithGeoData")
def getWithGeoData(**args):
return _format_extras(args), _extract_photo_list
@static_caller("flickr.photos.getWithoutGeoData")
def getWithoutGeoData(**args):
return _format_extras(args), _extract_photo_list
@caller("flickr.photos.people.getList")
def getPeople(self, **args):
def format_result(r, token):
info = r["people"]
people = info.pop("person")
people_ = []
if isinstance(people, Person):
people = [people]
for p in people:
p["id"] = p["nsid"]
p["photo"] = self
people_.append(Person(**p))
return people_
return args, format_result
@static_caller("flickr.photos.geo.photosForLocation")
def photosForLocation(**args):
return args, _extract_photo_list
@static_caller("flickr.photos.recentlyUpdated")
def recentlyUpdated(**args):
return _format_extras(args), _extract_photo_list
@caller("flickr.favorites.remove")
def removeFromFavorites(self, **args):
return args, _none
@caller("flickr.photos.geo.removeLocation")
def removeLocation(self, **args):
return args, _none
@caller("flickr.photos.transform.rotate")
def rotate(self, degrees, **args):
args["degrees"] = degrees
def format_result(r, token):
photo_id = r["photo_id"]["_content"]
photo_secret = r["photo_id"]["secret"]
return Photo(token=token, id=photo_id, secret=photo_secret)
return args, format_result
@static_caller("flickr.photos.search")
def search(**args):
args = _format_id("user", args)
args = _format_extras(args)
return args, _extract_photo_list
@caller("flickr.photos.geo.setContext")
def setContext(self, context, **args):
args["context"] = context
return args, _none
@caller("flickr.photos.setContentType")
def setContentType(self, **args):
return args, _none
@caller("flickr.photos.setDates")
def setDates(self, **args):
return args, _none
@caller("flickr.photos.geo.setPerms")
def setGeoPerms(self, **args):
return args, _none
@caller("flickr.photos.licenses.setLicense")
def setLicence(self, license, **args):
return _format_id("licence", args), _none
@caller("flickr.photos.geo.setLocation")
def setLocation(self, **args):
return args, _none
@caller("flickr.photos.setMeta")
def setMeta(self, **args):
return args, _none
@caller("flickr.photos.setPerms")
def setPerms(self, **args):
return args, _none
@caller("flickr.photos.setSafetyLevel")
def setSafetyLevel(self, **args):
return args, _none
@caller("flickr.photos.setTags")
def setTags(self, tags, **args):
args["tags"] = tags
return args, _none
@caller("flickr.photos.suggestions.suggestLocation")
def suggestLocation(self, **args):
_format_id("place", args)
def format_result(r):
info = r["suggestions"]
suggestions_ = _check_list(info.pop("suggestion"))
suggestions = []
for s in suggestions_:
if "photo_id" in s:
s["photo"] = Photo(id=s.pop("photo_id"))
if "suggested_by" in s:
s["suggested_by"] = Person(id=s["suggested_by"])
suggestions.append(Photo.Suggestion(**s))
return FlickrList(suggestions, info=Info(**info))
return args, format_result
class PhotoGeoPerms(FlickrObject):
__converters__ = [
dict_converter(["ispublic", "iscontact", "isfamily", "isfriend"], bool)
]
__display__ = ["id", "ispublic", "iscontact", "isfamily", "isfriend"]
class Photoset(FlickrObject):
__converters__ = [
dict_converter(["photos"], int),
]
__display__ = ["id", "title"]
__self_name__ = "photoset_id"
class Comment(FlickrObject):
__display__ = ["id"]
__self_name__ = "comment_id"
@caller("flickr.photosets.comments.deleteComment")
def delete(self, **args):
return args, _none
@caller("flickr.photosets.comments.editComment")
def edit(self, **args):
self._set_properties(**args)
return args, _none
@caller("flickr.photosets.addPhoto")
def addPhoto(self, **args):
return _format_id("photo", args), _none
@caller("flickr.photosets.comments.addComment")
def addComment(self, **args):
return (
args,
lambda r, token: Photoset.Comment(token=token, photoset=self, **r)
)
@static_caller("flickr.photosets.create")
def create(**args):
try:
pphoto = args.pop("primary_photo")
pphoto_id = pphoto.id
except KeyError:
pphoto_id = args.pop("primary_photo_id")
pphoto = Photo(id=pphoto_id)
args["primary_photo_id"] = pphoto_id
def format_result(r, token):
photoset = r["photoset"]
photoset["primary"] = pphoto
return Photoset(token=token, **photoset)
return args, format_result
@caller("flickr.photosets.delete")
def delete(self, **args):
return args, _none
@caller("flickr.photosets.editMeta")
def editMeta(self, **args):
return args, _none
@caller("flickr.photosets.editPhotos")
def editPhotos(self, **args):
try:
args["primary_photo_id"] = args.pop("primary_photo").id
except KeyError:
pass
try:
args["photo_ids"] = [p.id for p in args["photos"]]
except KeyError:
pass
photo_ids = args["photo_ids"]
if isinstance(photo_ids, list):
args["photo_ids"] = ", ".join(photo_ids)
return args, _none
@caller("flickr.photosets.comments.getList")
def getComments(self, **args):
def format_result(r, token):
comments = r["comments"]["comment"]
comments_ = []
if not isinstance(comments, list):
comments = [comments]
for c in comments:
author = c["author"]
authorname = c.pop("authorname")
c["author"] = Person(id=author, username=authorname)
comments_.append(
Photoset.Comment(token=token, photo=self, **c)
)
return comments_
return args, format_result
@caller("flickr.photosets.getContext")
def getContext(self, **args):
def format_result(r, token):
return (Photo(token=token, **r["prevphoto"]),
Photo(token=token, **r["nextphoto"]))
return _format_id("photo", args), format_result
@caller("flickr.photosets.getInfo")
def getInfo(self, **args):
def format_result(r, token):
photoset = r["photoset"]
photoset["owner"] = Person(token=token, id=photoset["owner"])
return photoset
return args, format_result
@caller("flickr.photosets.getPhotos")
def getPhotos(self, **args):
def format_result(r):
ps = r["photoset"]
return FlickrList([Photo(**p) for p in ps["photo"]],
Info(pages=ps["pages"],
page=ps["page"],
perpage=ps["perpage"],
total=ps["total"]))
return _format_extras(args), format_result
@coroutine
def getAllPhotos(self, **args):
"""Returns list of all photos."""
all_photos = yield self.getPhotos(**args)
for page in xrange(2, all_photos.info.pages + 1):
args["page"] = page
photos = yield self.getPhotos(**args)
all_photos.extend(photos)
raise Return(all_photos)
@caller("flickr.stats.getPhotosetStats")
def getStats(self, date, **args):
args["date"] = date
return (
args,
lambda r: dict([(k, int(v)) for k, v in r["stats"].iteritems()])
)
@static_caller("flickr.photosets.orderSets")
def orderSets(**args):
try:
photosets = args.pop("photosets")
args["photoset_ids"] = [ps.id for ps in photosets]
except KeyError:
pass
photoset_ids = args["photoset_ids"]
if isinstance(photoset_ids, list):
args["photoset_ids"] = ", ".join(photoset_ids)
return args, _none
@caller("flickr.photosets.removePhoto")
def removePhoto(self, **args):
return _format_id("photo", args), _none
@caller("flickr.photosets.removePhotos")
def removePhotos(self, **args):
try:
args["photo_ids"] = [p.id for p in args.pop("photos")]
except KeyError:
pass
photo_ids = args["photo_ids"]
if isinstance(photo_ids, list):
args["photo_ids"] = u", ".join(photo_ids)
return args, _none
@caller("flickr.photosets.reorderPhotos")
def reorderPhotos(self, **args):
try:
args["photo_ids"] = [p.id for p in args.pop("photos")]
except KeyError:
pass
photo_ids = args["photo_ids"]
if isinstance(photo_ids, list):
args["photo_ids"] = u", ".join(photo_ids)
return args, _none
@caller("flickr.photosets.setPrimaryPhoto")
def setPrimaryPhoto(self, **args):
return _format_id("photo", args), _none
class Place(FlickrObject):
__display__ = ["id", "name", "woeid", "latitude", "longitude"]
__converters__ = [
dict_converter(["latitude", "longitude"], float),
]
__self_name__ = 'place_id'
class ShapeData(FlickrObject):
class Polyline(FlickrObject):
pass
class Type(FlickrObject):
__display__ = ["id", "text"]
class Tag(FlickrObject):
__display__ = ["text", "count"]
__converters__ = [
dict_converter(["count"], int),
]
@static_caller("flickr.places.find")
def find(**args):
return args, _extract_place_list
@static_caller("flickr.places.findByLatLon")
def findByLatLon(**args):
return args, _extract_place_list
@caller("flickr.places.getChildrenWithPhotosPublic")
def getChildrenWithPhotoPublic(self, **args):
return args, _extract_place_list
@caller("flickr.places.getInfo")
def getInfo(self, **args):
def format_result(r):
return Place.parse_place(r["place"])
return args, format_result
@staticmethod
def parse_shapedata(shape_data_dict):
shapedata = shape_data_dict.copy()
shapedata["polylines"] = [
Place.ShapeData.Polyline(coords=p.split(" "))
for p in shapedata["polylines"]["polyline"]
]
if "url" in shapedata:
shapedata["shapefile"] = shapedata.pop("urls")["shapefile"].text
return shapedata
@staticmethod
def parse_place(place_dict):
place = place_dict.copy()
if "locality" in place:
place["locality"] = Place(**Place.parse_place(place["locality"]))
if "county" in place:
place["county"] = Place(**Place.parse_place(place["county"]))
if "region" in place:
place["region"] = Place(**Place.parse_place(place["region"]))
if "country" in place:
place["country"] = Place(**Place.parse_place(place["country"]))
if "shapedata" in place:
shapedata = Place.parse_shapedata(place["shapedata"])
place["shapedata"] = Place.ShapeData(**shapedata)
if "text" in place:
place["name"] = place.pop("text")
place["id"] = place.pop("place_id")
return place
@static_caller("flickr.places.getInfoByUrl")
def getByUrl(url):
return {'url': url}, lambda r: Place(**Place.parse_place(r["place"]))
@static_caller("flickr.places.getPlaceTypes")
def getPlaceTypes(**args):
def format_result(r):
places_types = r["place_types"]["place_type"]
return [Place.Type(id=pt.pop("place_type_id"), **pt)
for pt in places_types]
return args, format_result
@static_caller("flickr.places.getShapeHistory")
def getShapeHistory(**args):
def format_result(r):
info = r["shapes"]
return [Place.ShapeData(**Place.parse_shapedata(sd))
for sd in _check_list(info.pop("shapedata"))]
return args, format_result
@caller("flickr.places.getTopPlacesList")
def getTopPlaces(self, **args):
return args, _extract_place_list
@static_caller("flickr.places.placesForBoundingBox")
def placesForBoundingBox(**args):
def format_result(r):
info = r["places"]
return [
Place(Place.parse_place(place))
for place in info.pop("place")]
return args, format_result
@static_caller("flickr.places.placesForContacts")
def placesForContacts(**args):
def format_result(r):
info = r["places"]
return [
Place(Place.parse_place(place))
for place in info.pop("place")]
return args, format_result
@static_caller("flickr.places.placesForTags")
def placesForTags(**args):
return args, _extract_place_list
@static_caller("flickr.places.placesForUser")
def placesForUser(**args):
return args, _extract_place_list
@static_caller("flickr.places.tagsForPlace")
def tagsForPlace(**args):
args = _format_id("place", args)
args = _format_id("woe", args)
return args, lambda r: [Place.Tag(**t) for t in r["tags"]["tag"]]
@caller("flickr.places.tagsForPlace")
def getTags(self, **args):
return args, lambda r: [Place.Tag(**t) for t in r["tags"]["tag"]]
class prefs(FlickrObject):
@static_caller("flickr.prefs.getContentType")
def getContentType(**args):
return args, lambda r: r["person"]["content_type"]
@static_caller("flickr.prefs.getGeoPerms")
def getGeoPerms(**args):
return args, lambda r: r["person"]
@static_caller("flickr.prefs.getHidden")
def getHidden(**args):
return args, lambda r: bool(r["person"]["hidden"])
@static_caller("flickr.prefs.getPrivacy")
def getPrivacy(**args):
return args, lambda r: r["person"]["privacy"]
@static_caller("flickr.prefs.getSafetyLevel")
def getSafetyLevel(**args):
return args, lambda r: r["person"]["safety_level"]
class Reflection(FlickrObject):
@static_caller("flickr.reflection.getMethodInfo")
def getMethodInfo(method_name):
return {"method_name": method_name}, lambda r: r["method"]
@static_caller("flickr.reflection.getMethods")
def getMethods():
return {}, lambda r: r["methods"]["method"]
class stats(FlickrObject):
class Domain(FlickrObject):
__display__ = ["name"]
class Referrer(FlickrObject):
__display__ = ["url", "views"]
__converters__ = [
dict_converter(["views"], int),
]
@static_caller("flickr.stats.getCollectionDomains")
def getCollectionDomains(**args):
def format_result(r):
info = r["domains"]
domains = [stats.Domain(**d) for d in info.pop("domain")]
return FlickrList(domains, Info(**info))
return _format_id("collection", args), format_result
@static_caller("flickr.stats.getCollectionReferrers")
def getCollectionReferrers(**args):
def format_result(r):
info = r["domain"]
referrers = [stats.Referrer(**r) for r in info.pop("referrer")]
return FlickrList(referrers, Info(**info))
return _format_id("collection", args), format_result
@static_caller("flickr.stats.getCSVFiles")
def getCSVFiles():
return {}, lambda r: r["stats"]["csvfiles"]["csv"]
@static_caller("flickr.stats.getPhotoDomains")
def getPhotoDomains(**args):
def format_result(r):
info = r["domains"]
domains = [stats.Domain(**d) for d in info.pop("domain")]
return FlickrList(domains, Info(**info))
return _format_id("photo", args), format_result
@static_caller("flickr.stats.getPhotoReferrers")
def getPhotoReferrers(**args):
def format_result(r):
info = r["domain"]
referrers = [stats.Referrer(**r) for r in info.pop("referrer")]
return FlickrList(referrers, Info(**info))
return _format_id("photo", args), format_result
@static_caller("flickr.stats.getPhotosetDomains")
def getPhotosetDomains(**args):
def format_result(r):
info = r["domains"]
domains = [stats.Domain(**d) for d in info.pop("domain")]
return FlickrList(domains, Info(**info))
return _format_id("photoset", args), format_result
@static_caller("flickr.stats.getPhotosetReferrers")
def getPhotosetReferrers(**args):
def format_result(r):
info = r["domain"]
referrers = [stats.Referrer(**r) for r in info.pop("referrer")]
return FlickrList(referrers, Info(**info))
return _format_id("photoset", args), format_result
@static_caller("flickr.stats.getPhotostreamDomains")
def getPhotostreamDomains(**args):
def format_result(r):
info = r["domains"]
domains = [stats.Domain(**d) for d in info.pop("domain")]
return FlickrList(domains, Info(**info))
return args, format_result
@static_caller("flickr.stats.getPhotostreamReferrers")
def getPhotostreamReferrers(**args):
def format_result(r):
info = r["domain"]
referrers = [stats.Referrer(**r) for r in info.pop("referrer")]
return FlickrList(referrers, Info(**info))
return args, format_result
@static_caller("flickr.stats.getPhotostreamStats")
def getPhotostreamStats(date, **args):
args["date"] = date
return args, lambda r: int(r["stats"]["views"])
@static_caller("flickr.stats.getPopularPhotos")
def getPopularPhotos():
def format_result(r):
info = r["photos"]
photos = []
for p in info.pop("photo"):
pstat = p.pop("stats")
photos.append((Photo(**p), pstat))
return FlickrList(photos, Info(**info))
return {}, format_result
@static_caller("flickr.stats.getTotalViews")
def getTotalViews(**args):
return args, lambda r: r["stats"]
class Tag(FlickrObject):
__display__ = ["id", "text"]
__self_name__ = "tag_id"
class Cluster(FlickrObject):
__display__ = ["total"]
__self_name__ = "cluster_id"
@caller("flickr.tags.getClusterPhotos")
def getPhotos(self, **args):
return args, _extract_photo_list
@caller("flickr.photos.removeTag")
def remove(self, **args):
return args, _none
@static_caller("flickr.tags.getClusters")
def getClusters(**args):
def format_result(r):
clusters = r["clusters"]["cluster"]
return [
Tag.Cluster(tag=args["tag"],
tags=[Tag(text=t) for t in c["tag"]],
total=c["total"]
) for c in clusters
]
return args, format_result
@static_caller("flickr.tags.getHotList")
def getHotList(**args):
return args, lambda r: [Tag(**t) for t in r["hottags"]["tag"]]
@static_caller("flickr.tags.getListUser")
def getListUser(**args):
return (
_format_id("user", args),
lambda r: [Tag(**t) for t in r["who"]["tags"]["tag"]]
)
@static_caller("flickr.tags.getListUserPopular")
def getListUserPopular(**args):
return (_format_id("user", args),
lambda r: [Tag(**t) for t in r["who"]["tags"]["tag"]])
@static_caller("flickr.tags.getListUserRaw")
def getListUserRaw(**args):
def format_result(r):
tags = r["who"]["tags"]["tag"]
return [{'clean': t["clean"], "raws": t["raw"]} for t in tags]
return args, format_result
@static_caller("flickr.tags.getRelated")
def getRelated(tag, **args):
args["tag"] = tag
return args, lambda r: r["tags"]["tag"]
class test(FlickrObject):
@static_caller("flickr.test.echo")
def echo(**args):
return args, lambda r: r
@static_caller("flickr.test.login")
def login(token=None, **args):
return args, lambda r: Person(token=token, **r["user"])
@static_caller("flickr.test.null")
def null():
return {}, _none
class UploadTicket(FlickrObject):
pass
def _extract_activity_list(r):
items = _check_list(r["items"]["item"])
activities = []
for item in items:
activity = item.pop("activity")
item_type = item.pop(["type"])
if item_type == "photo":
item = Photo(**item)
elif item_type == "photoset":
item = Photoset(**item)
events_ = []
events = _check_list(activity["event"])
for e in events:
user = e["user"]
username = e.pop("username")
e["user"] = Person(id=user, username=username)
e_type = e.pop("type")
if e_type == "comment":
if item_type == "photo":
events_.append(Photo.Comment(photo=item, **e))
elif item_type == "photoset":
events_.append(Photoset.Comment(photoset=item, **e))
elif e_type == 'note':
events_.append(Photo.Note(photo=item, **e))
activities.append(Activity(item=item, events=events))
return activities
def _format_id(name, args):
try:
args[name + "_id"] = args.pop(name).id
except KeyError:
pass
return args
def _format_extras(args):
try:
extras = args["extras"]
if isinstance(extras, list):
args["extras"] = ", ".join(extras)
except KeyError:
pass
return args
def _new(cls):
def _newobject(**args):
return cls(**args)
return _newobject
def _none(r):
return None
def _extract_place_list(r):
info = r["places"]
return FlickrList(
[Place(id=place.pop("place_id"), **place)
for place in info.pop("place")],
Info(**info)
)
def _extract_photo_list(r, token=None):
photos = []
infos = r["photos"]
pp = infos.pop("photo")
if not isinstance(pp, list):
pp = [pp]
for p in pp:
owner = Person(id=p["owner"], token=token)
p["owner"] = owner
p["token"] = token
photos.append(Photo(**p))
return FlickrList(photos, Info(**infos))
def _check_list(obj):
if isinstance(obj, list):
return obj
else:
return [obj]
class Walker(object):
"""
Object to walk along paginated results. This allows
to loop on all the results corresponding to a query
regardless pagination.
w = Walker(method,*args, **kwargs)
arguments:
- method: a method returning a FlickrList object.
- *args: positional arguments to call 'method' with
- **kwargs: named arguments to call 'method' with
ex:
>>> w = Walker(Photo.search, tags="animals")
>>> for photo in w:
>>> print photo.title
You can also use slices:
ex:
>>> w = Walker(Photo.search, tags="animals")
>>> for photo in w[:20]:
>>> print photo.title
but be aware that if a starting index is given all the items
till the wanted one will be iterated, so using a large
starting value might be slow.
"""
def __init__(self, method, *args, **kwargs):
"""
Constructor
arguments:
- method: a method returning a FlickrList object.
- *args: positional arguments to call 'method' with
- **kwargs: named arguments to call 'method' with
"""
self.method = method
self.args = args
self.kwargs = kwargs
self._curr_list = self.method(*self.args, **self.kwargs)
self._info = self._curr_list.info
self._curr_index = 0
self._page = 1
self.stop = None
def __len__(self):
return self._info.total
def __iter__(self):
return self
def __getitem__(self, slice_):
if isinstance(slice_, slice):
return SlicedWalker(self,
slice_.start,
slice_.stop,
slice_.step)
else:
raise ValueError("Only slices can be used as subscript")
def next(self):
if self._curr_index == len(self._curr_list):
if self._page < self._info.pages:
self._page += 1
self.kwargs["page"] = self._page
self._curr_list = self.method(*self.args, **self.kwargs)
self._info = self._curr_list.info
self._curr_index = 0
else:
raise StopIteration()
curr = self._curr_list[self._curr_index]
self._curr_index += 1
return curr
class SlicedWalker(object):
""" Used to apply slices on objects.
Starting at a large index might be slow since all items till
the start one will be iterated.
"""
def __init__(self, walker, start, stop, step):
self.walker = walker
self.start = start or 0
self.stop = stop or len(walker)
self.step = step or 1
self._begin = True
self._total = 0
def __len__(self):
return (self.stop - self.start) // self.step
def __iter__(self):
return self
def next(self):
if self._begin:
for i in range(self.start):
self.walker.next()
self._total += 1
self._begin = False
else:
for i in range(self.step - 1):
self._total += 1
self.walker.next()
if self._total < self.stop:
self._total += 1
return self.walker.next()
else:
raise StopIteration()
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/naive_bayes.py | 1 | 48947 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import logsumexp
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, deprecated
from .utils.extmath import safe_sparse_dot
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted, check_non_negative, column_or_1d
from .utils.validation import _check_sample_weight
from .utils.validation import _deprecate_positional_args
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB',
'CategoricalNB']
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_classes, n_samples).
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks."""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier
epsilon_ : float
absolute additive value to variances
sigma_ : ndarray of shape (n_classes, n_features)
variance of each feature per class
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y)
y = column_or_1d(y, warn=True)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
def _check_X(self, X):
return check_array(X)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_new * n_past / n_total) * (mu - new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
_ALPHA_MIN = 1e-10
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
"""
def _check_X(self, X):
return check_array(X, accept_sparse='csr')
def _check_X_y(self, X, y):
return self._validate_data(X, y, accept_sparse='csr')
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (log_class_count -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.n_features_:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self._init_counters(n_effective_classes, n_features)
self.n_features_ = n_features
elif n_features != self.n_features_:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.n_features_))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
self.n_features_ = n_features
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self._init_counters(n_effective_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_effective_classes, n_features):
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
# mypy error: Decorated property not supported
@deprecated("Attribute coef_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 0.26.")
@property
def coef_(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
# mypy error: Decorated property not supported
@deprecated("Attribute intercept_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 0.26.")
@property
def intercept_(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
def _more_tags(self):
return {'poor_score': True}
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes, )
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 0.26.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``intercept_`` is deprecated in 0.24 and will be removed in 0.26.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB()
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 0.26.
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 0.26.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `BernoulliNB`
as a linear model.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `BernoulliNB`
as a linear model.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _check_X(self, X):
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y):
X, y = super()._check_X_y(X, y)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
min_categories : int or array-like of shape (n_features,), default=None
Minimum number of categories per feature.
- integer: Sets the minimum number of categories per feature to
`n_categories` for each features.
- array-like: shape (n_features,) where `n_categories[i]` holds the
minimum number of categories for the ith column of the input.
- None (default): Determines the number of categories automatically
from the training data.
.. versionadded:: 0.24
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_ : int
Number of features of each sample.
n_categories_ : ndarray of shape (n_features,), dtype=np.int64
Number of categories for each feature. This value is
inferred from the data or set by the minimum number of categories.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB()
>>> clf.fit(X, y)
CategoricalNB()
>>> print(clf.predict(X[2:3]))
[3]
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
min_categories=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.min_categories = min_categories
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples)
Target values.
classes : array-like of shape (n_classes), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().partial_fit(X, y, classes,
sample_weight=sample_weight)
def _more_tags(self):
return {'requires_positive_X': True}
def _check_X(self, X):
X = check_array(X, dtype='int', accept_sparse=False,
force_all_finite=True)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y):
X, y = self._validate_data(X, y, dtype='int', accept_sparse=False,
force_all_finite=True)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_effective_classes, n_features):
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_effective_classes, 0))
for _ in range(n_features)]
@staticmethod
def _validate_n_categories(X, min_categories):
# rely on max for n_categories categories are encoded between 0...n-1
n_categories_X = X.max(axis=0) + 1
min_categories_ = np.array(min_categories)
if min_categories is not None:
if not np.issubdtype(min_categories_.dtype, np.signedinteger):
raise ValueError(
f"'min_categories' should have integral type. Got "
f"{min_categories_.dtype} instead."
)
n_categories_ = np.maximum(n_categories_X,
min_categories_,
dtype=np.int64)
if n_categories_.shape != n_categories_X.shape:
raise ValueError(
f"'min_categories' should have shape ({X.shape[1]},"
f") when an array-like is provided. Got"
f" {min_categories_.shape} instead."
)
return n_categories_
else:
return n_categories_X
def _count(self, X, Y):
def _update_cat_count_dims(cat_count, highest_feature):
diff = highest_feature + 1 - cat_count.shape[1]
if diff > 0:
# we append a column full of zeros for each new category
return np.pad(cat_count, [(0, 0), (0, diff)], 'constant')
return cat_count
def _update_cat_count(X_feature, Y, cat_count, n_classes):
for j in range(n_classes):
mask = Y[:, j].astype(bool)
if Y.dtype.type == np.int64:
weights = None
else:
weights = Y[mask, j]
counts = np.bincount(X_feature[mask], weights=weights)
indices = np.nonzero(counts)[0]
cat_count[j, indices] += counts[indices]
self.class_count_ += Y.sum(axis=0)
self.n_categories_ = self._validate_n_categories(
X, self.min_categories)
for i in range(self.n_features_):
X_feature = X[:, i]
self.category_count_[i] = _update_cat_count_dims(
self.category_count_[i], self.n_categories_[i] - 1)
_update_cat_count(X_feature, Y,
self.category_count_[i],
self.class_count_.shape[0])
def _update_feature_log_prob(self, alpha):
feature_log_prob = []
for i in range(self.n_features_):
smoothed_cat_count = self.category_count_[i] + alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count) -
np.log(smoothed_class_count.reshape(-1, 1)))
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
if not X.shape[1] == self.n_features_:
raise ValueError("Expected input with %d features, got %d instead"
% (self.n_features_, X.shape[1]))
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_):
indices = X[:, i]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll
| bsd-3-clause |
ddcampayo/polyFEM | tools/movie_particles.py | 1 | 1385 | #!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
#import matplotlib.cm as cm
#from matplotlib.colors import Normalize
import sys
#print "This is the name of the script: ", sys.argv[0]
#print "Number of arguments: ", len(sys.argv)
#print "The arguments are: " , str(sys.argv)
if(len(sys.argv) == 1) :
init_t = 0
else:
init_t = int( sys.argv[1] )
#import pylab as pl
plt.figure(figsize=(8,8))
skip=1
#path='timings_full/'
path='./'
LL= 1
for n in range( init_t ,2000000+skip,skip):
plt.clf()
dt=np.loadtxt(path+str(n)+'/particles.dat')
x=dt[:,0]; y=dt[:,1];
vol=dt[:,3]
# vx=dt[:,5]; vym=dt[:,6];
p=dt[:,5]
# I=dt[:,14]; # eccentricity
r = np.sqrt( x**2 + y**2 )
rm = np.argmax(r)
p -= p[ rm ] # np.min( p )
# plt.plot( r , p , 'o' )
plt.scatter( x , y , s=80 , c=p )
# plt.scatter( x , y , 80, c= vol , vmin=0.0022, vmax=0.0028 )
# plt.scatter( x , y , 10, c=w )
# plt.scatter( x , y , 10, c=I )
# plt.scatter( x , y , 80, c= I , vmin= 1.02e-6, vmax= 1.06e-6 )
# plt.scatter( x , y , 80, c= np.log( d2 + 1e-18 ) )
# plt.scatter( x , y , 10, c=om )
plt.xlim([-LL/2.0 , LL/2.0 ])
plt.ylim([-LL/2.0 , LL/2.0 ])
# pl.colorbar(ticks=[0.45,0.55])
print( 'snap{:03d}'.format( int(n/skip) ) )
plt.savefig( 'snap{:03d}'.format( int(n/skip) ) )
| gpl-3.0 |
mhue/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
vibhorag/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
zhouqiang00/Particle-Segmentation | CompareDataStars_data.py | 1 | 30346 | #!/home/zhouqiang/softwares/EMAN2/extlib/bin/python
# Author: Qiang Zhou
# School of Medicine, Tsinghua University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# This complete copyright notice must be included in any revised version of the
# source code. Additional authorship citations may be added, but existing
# author citations must be preserved.
import os,sys
from metadata import *
import random
import math
from EMAN2 import *
from optparse import OptionParser
def getEulersIndexes(labels):
i_rot=labels["_rlnAngleRot"] - 1
i_tilt=labels["_rlnAngleTilt"] - 1
i_psi=labels["_rlnAnglePsi"] - 1
return i_rot,i_tilt,i_psi
def getCoordOriginIndexes(labels):
try:
i_x=labels["_rlnCoordinateX"] - 1
except KeyError:
i_x= -1
try:
i_y=labels["_rlnCoordinateY"] - 1
except KeyError:
i_y= -1
try:
i_sx=labels["_rlnOriginX"] - 1
except KeyError:
i_sx= -1
try:
i_sy=labels["_rlnOriginY"] - 1
except KeyError:
i_sy= -1
return i_x,i_y,i_sx,i_sy
def getEulersFromRelionDataRecord(record,i_rot,i_tilt,i_psi):
return float(record[i_rot]),float(record[i_tilt]),float(record[i_psi])
def getCoordOriginFromRelionDataRecord(record,i_x,i_y,i_sx,i_sy):
if i_x >= 0:
x=float(record[i_x])
else:
x=0.0
if i_y >= 0:
y=float(record[i_y])
else:
y=0.0
if i_sx >= 0:
sx=float(record[i_sx])
else:
sx=0.0
if i_sy >= 0:
sy=float(record[i_sy])
else:
sy=0.0
return x,y,sx,sy
#return float(record[i_x]),float(record[i_y]),float(record[i_sx]),float(record[i_sy])
def CLIP(v,max=0.999999,min=-0.999999):
if v > max:
v = max
if v < min:
v = min
return v
def RegularAnglePhiPsi(angle):
while True:
if angle >= 360.:
angle -= 360.
if angle < 360.:
break
while True:
if angle < 0.0:
angle += 360.
if angle >=0.0:
break
return angle
def calculateEulerDistance(e1,e2,e3,e4,e5,e6,type="spider",symmetry="c1"):
if type == "spider":
syms=Symmetries.get(symmetry)
diff_angs_min = 3000.
ts1=Transform({"type":"spider","phi":e1,"theta":e2,"psi":e3})
ts2=Transform({"type":"spider","phi":e4,"theta":e5,"psi":e6})
# The following line does not work for some particles, at least for c6 symmetry with unknown reason
#for sym in syms.get_syms():
matrix1=ts1.get_matrix()
for n in range(syms.get_nsym()):
if symmetry == "c1" or symmetry == "C1":
matrix2=ts2.get_matrix()
else:
# Must be use this reduce method for c6 symmetry.
ts2_reduced=syms.reduce(ts2,n)
matrix2=ts2_reduced.get_matrix()
v1_1=Vec3f(matrix1[0],matrix1[4],matrix1[8])
v1_2=Vec3f(matrix1[1],matrix1[5],matrix1[9])
v1_3=Vec3f(matrix1[2],matrix1[6],matrix1[10])
v2_1=Vec3f(matrix2[0],matrix2[4],matrix2[8])
v2_2=Vec3f(matrix2[1],matrix2[5],matrix2[9])
v2_3=Vec3f(matrix2[2],matrix2[6],matrix2[10])
diff_angs = degrees(acos(CLIP(v1_1.dot(v2_1))))
diff_angs += degrees(acos(CLIP(v1_2.dot(v2_2))))
diff_angs += degrees(acos(CLIP(v1_3.dot(v2_3))))
if diff_angs < diff_angs_min:
diff_angs_min = diff_angs
return diff_angs_min / 3
def calculateTranslationDistance(x1,y1,sx1,sy1,b1,x2,y2,sx2,sy2,b2):
return ((x2 - (sx2 * b2) - (x1 - sx1 * b1) ) ** 2 + ( y2 - ( sy2 * b2) - (y1 - sy1 * b1) ) ** 2 ) ** 0.5
def CloseEnough(record1,i_x1,i_y1,i_sx1,i_sy1,binningfactor1,record2,i_x2,i_y2,i_sx2,i_sy2,binningfactor2,overlap):
x1,y1,sx1,sy1=getCoordOriginFromRelionDataRecord(record1,i_x1,i_y1,i_sx1,i_sy1)
x2,y2,sx2,sy2=getCoordOriginFromRelionDataRecord(record2,i_x2,i_y2,i_sx2,i_sy2)
return calculateTranslationDistance(x1,y1,sx1,sy1,binningfactor1,x2,y2,sx2,sy2,binningfactor2) <= overlap
def calculateEulerTransformation(e1,e2,e3,e4,e5,e6,type="spider",symmetry="c1"):
if type == "spider":
syms=Symmetries.get(symmetry)
ts1=Transform({"type":"spider","phi":e1,"theta":e2,"psi":e3})
ts2=Transform({"type":"spider","phi":e4,"theta":e5,"psi":e6})
# Find Euler transformation of volume 1 with Euler 1 relative to volume 2 with Euler 2
# ts1: 30S, ts2: 50S. For ratched motion, 30S rotated against 50S by transform_1relativeto2.
# And then 30S and 50S rotate together by ts2, getting the final position ts1 of 30S.
# ts1 = ts2 * transform_1relativeto2
ts2.invert()
transform_1relativeto2 = ts2 * ts1
Eulers = transform_1relativeto2.get_rotation("spider")
return Eulers["phi"],Eulers["theta"],Eulers["psi"]
def main():
prog_name=os.path.basename(sys.argv[0])
usage="""
# Compare Euler angles between two metadata files which are specified with --input_data_star1 and --input_data_star2.
# Corresponding particles in these two files must have the same image name. This is usually not the case.
# You can use following command to substitute the suffix of the data2.star to the suffix of data1.star to make them have the same image name. And then use the intermediate file temp_data2.star as --input_data_star2.
# cat data2.star | sed 's/suffixdata2/suffixdata1/g' > temp_data2.star
# Find Euler angle distance between two data.star files
# The fields in output file are:
# ImageName,Phi1,Theta1,Psi1,Phi2,Theta2,Psi2,deltaEuler,deltaPhi,deltaTheta,deltaPsi
# deltaEuler is the Euler angle distance of all of three Euler angles (phi, theta, psi).
# deltaPhi/Theta/Psi is calculated by algebraic subtraction of respective Euler angle.
{prog} < --input_data_star1 data1.star> < --input_data_star2 temp_data2.star > < -o output file > --calculateEulerError --sameImageName
# Find rotating angle of the reference volume of data1.star against to the reference volume of data2.star:
# The fields in output file are:
# ImageName, PhiET, ThetaET, PsiET, PhiET+PsiET, deltaPhi, deltaTheta, deltaPsi
# ET is the abbreation for Euler Transformation which is calculated by tranformation of Euler angle matrix.
# delta??? is calculated by algebraic subtraction.
{prog} < --input_data_star1 data1.star> < --input_data_star2 temp_data2.star > < -o output file > --calculateEulerTransform --sameImageName
# Transform euler angle of particles against to a rotated or flipped ref:
{prog} < --input_data_star1 data1.star> < -o output star file > < --TransformEuler > < [ --target_rot --target_tilt
--target_psi ] | [ --target_emanaz --target_emanalt --target_emanphi ] | [ --flipX | --flipY | --flipZ ]>
""".format(prog=prog_name)
optParser= OptionParser(usage)
optParser.add_option("--input_data_star1",action="store",type="str",dest="input_data_star1",default="",help="Input relion data.star file #1. [default: %default]")
optParser.add_option("--input_data_star2",action="store",type="str",dest="input_data_star2",default="",help="Input relion data.star file #2, usually a reference data.star with pre-determined values. [default: %default]")
optParser.add_option("--sameImageName",action="store_true",dest="sameImageName",default=False,help="Two input data.star files have same image names for same particles. [default: %default]")
optParser.add_option("--baseOnOverlap",action="store_true",dest="baseOnOverlap",default=False,help="The particles in the input data.star files close enough to each other are cosidered as the same particles. [default: %default]")
optParser.add_option("--overlap",action="store",type="float",dest="overlap",default=20.,help="The distance in pixel smaller thar which particles are considered overlapped each other.[default: %default]")
optParser.add_option("--CmpAllOverlap",action="store_true",dest="CmpAllOverlap",default=False,help="Compare all duplicated particles if the second star file contains such particles when calculateEulerError or calculateTransError. The minimum distance will be saved. [default: %default]")
optParser.add_option("-o","--outputstar",action="store",type="str",dest="outputstar",default="",help="Output relion data.star file. [default: %default]")
optParser.add_option("--calculateVariance",action="store",dest="calculateVariance",default="",help="The relion label which variance to be calculated. [default: %default]")
optParser.add_option("--ErrorBound",action="store",type="float",dest="ErrorBound",default=5.0,help="The Error bound to calculate variance and fit SD. Must be positive. [default: %default]")
optParser.add_option("--ErrorDistribution",action="store",type="str",dest="ErrorDistribution",default="",help="The output figure name of the error distribution of the label. [default: %default]")
optParser.add_option("--OutputSD",action="store",type="float",dest="OutputSD",default=2.0,help="Output particles within such SD. [default: %default]")
optParser.add_option("--calculateTransError",action="store_true",dest="calculateTransError",default=False,help="Calculate Translation differences between two input data.star. [default: %default]")
optParser.add_option("--binningfactor1",action="store",type="float",dest="binningfactor1",default=1.0,help="Binning factor of input_data_star #1. [default: %default]")
optParser.add_option("--binningfactor2",action="store",type="float",dest="binningfactor2",default=1.0,help="Binning factor of input_data_star #2. [default: %default]")
optParser.add_option("--calculateEulerError",action="store_true",dest="calculateEulerError",default=False,help="Calculate Euler angles differences between two input data.star. [default: %default]")
optParser.add_option("--flipX",action="store_true",dest="flipX",default=False,help="The references for the two input data.star files are flipped along X axis. [default: %default]")
optParser.add_option("--flipY",action="store_true",dest="flipY",default=False,help="The references for the two input data.star files are flipped along Y axis. [default: %default]")
optParser.add_option("--flipZ",action="store_true",dest="flipZ",default=False,help="The references for the two input data.star files are flipped along Z axis. [default: %default]")
optParser.add_option("--noflip",action="store_true",dest="noflip",default=False,help="The references for the two input data.star files are not flipped each other. [default: %default]")
optParser.add_option("--calculateEulerTransform",action="store_true",dest="calculateEulerTransform",default=False,help="Calculate Euler angle transformation of volume of input data.star 1 relative to volume of input data.star 2. [default: %default]")
optParser.add_option("--TransformEuler",action="store_true",dest="TransformEuler",default=False,help="Transform Euler angle of particles against a rotated or flipped reference. [default: %default]")
optParser.add_option("--target_rot",action="store",type="float",dest="target_rot",default=0.0,help="Euler angle rot of the rotated reference relative to original ref. Overwrite eman angle. [default: %default]")
optParser.add_option("--target_tilt",action="store",type="float",dest="target_tilt",default=0.0,help="Euler angle tilt of the rotated reference relative to original ref. Overwrite eman angle. [default: %default]")
optParser.add_option("--target_psi",action="store",type="float",dest="target_psi",default=0.0,help="Euler angle psi of the rotated reference relative to original ref.. Overwrite eman angle. [default: %default]")
optParser.add_option("--target_emanaz",action="store",type="float",dest="target_emanaz",default=0.0,help="Euler angle EMAN az of the rotated reference relative to original ref. [default: %default]")
optParser.add_option("--target_emanalt",action="store",type="float",dest="target_emanalt",default=0.0,help="Euler angle EMAN alt of the rotated reference relative to original ref. [default: %default]")
optParser.add_option("--target_emanphi",action="store",type="float",dest="target_emanphi",default=0.0,help="Euler angle EMAN phi of the rotated reference relative to original ref. [default: %default]")
optParser.add_option("--TransformAlignment",action="store",type="str",dest="TransformAlignment",default=False,help="Transform alignment parameters of particles to against another reference. The input is the 12 elements of the matrix seperated with ,. The matrix contains 3 rows and 4 columns. First three columns are rotation matrix. Last column is translation. The other reference would be superimposed with current reference after applying this matrix. [default: %default]")
optParser.add_option("--TransformEulerbySym",action="store_true",dest="TransformEulerbySym",default=False,help="Transform Euler angle by symmetry. Output a series of files with suffix '_sym?'. [default: %default]")
optParser.add_option("--RandomEulerbySym",action="store_true",dest="RandomEulerbySym",default=False,help="Random Euler angle by symmetry. (Random choose a symmetry-related Euler angle for particles)'. [default: %default]")
optParser.add_option("--symmetry",action="store",type="str",dest="symmetry",default="C1",help="The symmetry used in the 3D reconstruction. [default: %default]")
#optParser.add_option("--Euleroff_Phi",action="store",type="float",dest="Euleroff_Phi",default=0.0,help="PHI of Euler angle of reference of input data.star 1 relative to that of input 2. [default: %default]")
(options,args)=optParser.parse_args()
data1=data_meta(options.input_data_star1)
# find indexes
try:
i_MicrographName_1 = data1.data_["labels"]["_rlnMicrographName"] - 1
i_ImageName_1 = data1.data_["labels"]["_rlnImageName"] - 1
dataname1="data_"
except:
i_MicrographName_1 = data1.data_images["labels"]["_rlnMicrographName"] - 1
i_ImageName_1 = data1.data_images["labels"]["_rlnImageName"] - 1
dataname1="data_images"
try:
i_CoordinateX_1 = data1.__getattribute__(dataname1)["labels"]["_rlnCoordinateX"] - 1
i_CoordinateY_1 = data1.__getattribute__(dataname1)["labels"]["_rlnCoordinateY"] - 1
except:
pass
if options.input_data_star2:
data2=data_meta(options.input_data_star2)
# find indexes
try:
i_MicrographName_2 = data2.data_["labels"]["_rlnMicrographName"] - 1
i_ImageName_2 = data2.data_["labels"]["_rlnImageName"] - 1
dataname2="data_"
except :
i_MicrographName_2 = data2.data_images["labels"]["_rlnMicrographName"] - 1
i_ImageName_2 = data2.data_images["labels"]["_rlnImageName"] - 1
dataname2="data_images"
try:
i_CoordinateX_2 = data2.__getattribute__(dataname2)["labels"]["_rlnCoordinateX"] - 1
i_CoordinateY_2 = data2.__getattribute__(dataname2)["labels"]["_rlnCoordinateY"] - 1
except :
pass
# Hierachy second data.star by micrographs
allmics2={}
for i in range(len(data2.__getattribute__(dataname2)["datas"])):
mic = data2.__getattribute__(dataname2)["datas"][i][i_MicrographName_2]
if allmics2.has_key(mic):
allmics2[mic].append(data2.__getattribute__(dataname2)["datas"][i])
else:
allmics2[mic]=[]
allmics2[mic].append(data2.__getattribute__(dataname2)["datas"][i])
if options.calculateVariance:
try:
i_variance1=data1.__getattribute__(dataname1)["labels"][options.calculateVariance] - 1
i_variance2=data2.__getattribute__(dataname2)["labels"][options.calculateVariance] - 1
except KeyError:
print "Please input a proper label to calculate the variance."
exit(-1)
i_x1,i_y1,i_sx1,i_sy1=getCoordOriginIndexes(data1.__getattribute__(dataname1)["labels"])
i_x2,i_y2,i_sx2,i_sy2=getCoordOriginIndexes(data2.__getattribute__(dataname2)["labels"])
if options.calculateVariance[4:] in [ "DefocusAngle","AngleTilt","AngleRot","AnglePsi" ]:
myunit="Degrees"
elif options.calculateVariance[4:] in [ "OriginX","OriginY" ]:
myunit="Pixels"
elif options.calculateVariance[4:] in [ "DefocusU","DefocusV","UpdatedDefocusU","UpdatedDefocusV" ]:
myunit="Angstroms"
elif options.calculateVariance[4:] in [ "BeamTiltX","BeamTiltY" ]:
myunit="mRads"
else:
myunit="Unit"
rotrange=360.
if options.symmetry.startswith(("c","C")) and int(options.symmetry[1:]) > 1 :
rotrange /= int(options.symmetry[1:])
myvalues=[]
if options.outputstar:
myrecord1=[]
for record1 in data1.__getattribute__(dataname1)["datas"]:
mic = record1[i_MicrographName_1]
if allmics2.has_key(mic):
for record2 in allmics2[mic]:
x1,y1,sx1,sy1=getCoordOriginFromRelionDataRecord(record1,i_x1,i_y1,i_sx1,i_sy1)
x2,y2,sx2,sy2=getCoordOriginFromRelionDataRecord(record2,i_x2,i_y2,i_sx2,i_sy2)
if (options.sameImageName and record1[i_ImageName_1] == record2[i_ImageName_2] ) or \
(options.baseOnOverlap and CloseEnough(record1,i_x1,i_y1,i_sx1,i_sy1,options.binningfactor1,record2,i_x2,i_y2,i_sx2,i_sy2,options.binningfactor2,options.overlap)) :
if options.calculateVariance == "_rlnOriginX":
my_error= x1 - sx1 * options.binningfactor1 - x2 + sx2 * options.binningfactor2
elif options.calculateVariance == "_rlnOriginY":
my_error= y1 - sy1 * options.binningfactor1 - y2 + sy2 * options.binningfactor2
elif options.calculateVariance == "_rlnAngleTilt":
#if float(record2[i_variance2]) <= 170. and float(record2[i_variance2]) >= 10. :
my_error= float(record1[i_variance1]) - float(record2[i_variance2])
elif options.calculateVariance == "_rlnAngleRot":
my_error= (float(record1[i_variance1]) - float(record2[i_variance2]) + rotrange / 2. ) % rotrange - rotrange / 2.
elif options.calculateVariance == "_rlnAnglePsi":
my_error= (float(record1[i_variance1]) - float(record2[i_variance2]) + 180. ) % 360. - 180.
else:
my_error= float(record1[i_variance1]) - float(record2[i_variance2])
myvalues.append(my_error)
if options.outputstar:
myrecord1.append((record1,my_error))
if options.sameImageName:
print "There are %d common particles between the two input files."%(len(myvalues))
if options.baseOnOverlap:
print "There are %d common particles between the two input files within an overlap limit of %f pixels."%(len(myvalues),options.overlap)
import numpy as np
myvalues=np.array(myvalues)
#print "The SD of the error of %s is %f %s"%(options.calculateVariance,myvalues.var()**0.5,myunit.lower())
#if options.calculateVariance == "_rlnAngleRot" or options.calculateVariance == "_rlnAnglePsi" or options.calculateVariance == "_rlnAngleTilt":
myvalues=myvalues[np.abs(myvalues) <= options.ErrorBound ]
print "There are %d common particles after removing particles with error larger than %f"%(myvalues.shape[0],options.ErrorBound)
#print "The SD of the error of %s is %f %s after removing particles."%(options.calculateVariance,myvalues.var()** 0.5,myunit.lower())
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.optimize import leastsq
def func(x,p):
bg,SD,myloc=p
return norm.pdf(x,loc=myloc,scale=SD) + bg
def residuals(p,y,x):
return y - func(x,p)
mypdf,bins,patches=plt.hist(myvalues,bins=100,normed=True,label="Distribution of %s Error"%(options.calculateVariance[4:]))
plsq=leastsq(residuals,(0.01,myvalues.var() ** 0.5,0.0),args=(mypdf,bins[:-1]))
#print plsq
#fit_SD=norm.fit(myvalues,floc = 0.0)[1]
print "The fit SD of %s is %.2f"%(options.calculateVariance[4:],plsq[0][1])
plt.plot(bins,norm.pdf(bins,loc=plsq[0][2],scale=plsq[0][1]) + plsq[0][0],'g+-',label="Fit Curve")
plt.ylabel(r"Distribution",fontsize=18)
plt.xlabel(r"%s Error in %s"%(options.calculateVariance[4:],myunit.lower()),fontsize=18)
plt.text(1.0,norm.pdf(0.0,loc=plsq[0][2],scale=plsq[0][1]),"SD = %.2f %s"%(plsq[0][1],myunit.lower()))
#plt.legend()
if options.ErrorDistribution:
plt.savefig(options.ErrorDistribution,dpi=90)
#plt.show()
if options.outputstar:
myrecord1=[recordError[0] for recordError in myrecord1 if abs(recordError[1]) <= options.OutputSD * plsq[0][1] ]
data1.__getattribute__(dataname1)["datas"]=myrecord1
data1.write(options.outputstar)
if options.calculateTransError:
i_x1,i_y1,i_sx1,i_sy1=getCoordOriginIndexes(data1.__getattribute__(dataname1)["labels"])
i_x2,i_y2,i_sx2,i_sy2=getCoordOriginIndexes(data2.__getattribute__(dataname2)["labels"])
count=0
diff_transs=0.0
for record1 in data1.__getattribute__(dataname1)["datas"]:
mic = record1[i_MicrographName_1]
if allmics2.has_key(mic):
diff_trans = -1
x1,y1,sx1,sy1=getCoordOriginFromRelionDataRecord(record1,i_x1,i_y1,i_sx1,i_sy1)
for record2 in allmics2[mic]:
x2,y2,sx2,sy2=getCoordOriginFromRelionDataRecord(record2,i_x2,i_y2,i_sx2,i_sy2)
mydiff_trans = calculateTranslationDistance(x1,y1,sx1,sy1,options.binningfactor1,x2,y2,sx2,sy2,options.binningfactor2)
if (options.sameImageName and record1[i_ImageName_1] == record2[i_ImageName_2]) or ( options.baseOnOverlap and mydiff_trans<=options.overlap ):
if not options.CmpAllOverlap:
diff_trans=mydiff_trans
break
else:
if diff_trans < 0 or mydiff_trans < diff_trans:
diff_trans=mydiff_trans
if diff_trans >= 0.:
count += 1
diff_transs += diff_trans
if options.outputstar:
record1.append(str(diff_trans))
if options.outputstar:
print "The match records between two star file is %d particles"%(count)
print "The average translation difference between two input star files is: %f pixels"%(diff_transs / count)
data1.write(options.outputstar)
if options. calculateEulerError:
if options.baseOnOverlap:
i_x1,i_y1,i_sx1,i_sy1=getCoordOriginIndexes(data1.__getattribute__(dataname1)["labels"])
i_x2,i_y2,i_sx2,i_sy2=getCoordOriginIndexes(data2.__getattribute__(dataname2)["labels"])
diff_eulers = 0.0
count = 0
i_rot1,i_tilt1,i_psi1=getEulersIndexes(data1.__getattribute__(dataname1)["labels"])
i_rot2,i_tilt2,i_psi2=getEulersIndexes(data2.__getattribute__(dataname2)["labels"])
outlines=[]
for record1 in data1.__getattribute__(dataname1)["datas"]:
mic = record1[i_MicrographName_1]
if allmics2.has_key(mic):
diff_euler= -1
e1,e2,e3=getEulersFromRelionDataRecord(record1,i_rot1,i_tilt1,i_psi1)
for record2 in allmics2[mic]:
if (options.sameImageName and record1[i_ImageName_1] == record2[i_ImageName_2] ) or \
(options.baseOnOverlap and CloseEnough(record1,i_x1,i_y1,i_sx1,i_sy1,options.binningfactor1,record2,i_x2,i_y2,i_sx2,i_sy2,options.binningfactor2,options.overlap)) :
e4,e5,e6=getEulersFromRelionDataRecord(record2,i_rot2,i_tilt2,i_psi2)
if options.flipX:
e4 *= -1
e5 = 180. - e5
elif options.flipY:
e4 = 180. - e4
e5 = 180. - e5
elif options.flipZ:
e5 *= -1
#e4 += options.Euleroff_Phi
mydiff_euler = calculateEulerDistance(e1,e2,e3,e4,e5,e6,type="spider",symmetry=options.symmetry)
if not options.CmpAllOverlap:
diff_euler=mydiff_euler
break
else:
if diff_euler < 0 or mydiff_euler < diff_euler:
diff_euler=mydiff_euler
if diff_euler >= 0.:
count += 1
diff_eulers += diff_euler
if options.outputstar:
if options.flipZ or options.flipX or options.flipY or options.noflip:
record1.append(str(diff_euler))
else :
outlines.append("%s %f %f %f %f %f %f %f %f %f %f\n"%(record1[i_ImageName_1],e1,e2,e3,e4,e5,e6,diff_euler,RegularAnglePhiPsi(e4-e1),e5-e2,e6-e3))
if options.outputstar:
print "The match records between two star file is %d particles"%(count)
print "The average Euler angle difference between two input star files is: %f degrees"%(diff_eulers / count)
if options.flipZ or options.flipX or options.flipY or options.noflip :
data1.write(options.outputstar)
else:
f = open(options.outputstar,"w")
for outline in outlines:
f.write(outline)
f.close()
print "The fields in output file are:"
print "ImageName,Phi1,Theta1,Psi1,Phi2,Theta2,Psi2,deltaEuler,deltaPhi,deltaTheta,deltaPsi"
if options.TransformEulerbySym:
i_rot1,i_tilt1,i_psi1=getEulersIndexes(data1.__getattribute__(dataname1)["labels"])
allEulers=[]
for record1 in data1.__getattribute__(dataname1)["datas"]:
e1,e2,e3=getEulersFromRelionDataRecord(record1,i_rot1,i_tilt1,i_psi1)
allEulers.append((e1,e2,e3))
allTrans=[Transform({"type":"spider","phi":e[0],"theta":e[1],"psi":e[2]}) for e in allEulers]
allEulers=None
syms=Symmetries.get(options.symmetry)
for index_sym in range(syms.get_nsym()):
dirname=os.path.dirname(options.input_data_star1)
basename=os.path.splitext(os.path.basename(options.input_data_star1))[0]
symed_star = os.path.join(dirname,basename+"_sym%d.star"%(index_sym))
my_current_sym = syms.get_sym(index_sym)
n=0
for record1 in data1.__getattribute__(dataname1)["datas"]:
current_trans=allTrans[n]
trans_by_sym = current_trans * my_current_sym
Eulers_by_sym = trans_by_sym.get_rotation("spider")
record1[i_rot1]=str(Eulers_by_sym["phi"])
record1[i_tilt1]=str(Eulers_by_sym["theta"])
record1[i_psi1]=str(Eulers_by_sym["psi"])
n += 1
data1.write(symed_star)
if options.TransformEuler:
TE=Transform()
do_flip=False
if (options.target_rot != 0 or options.target_tilt != 0 or options.target_psi != 0):
TE.set_rotation({'type':"spider",'phi':options.target_rot,'theta':options.target_tilt,'psi':options.target_psi })
elif (options.target_emanaz != 0 or options.target_emanalt != 0 or options.target_emanphi != 0) :
TE.set_rotation({'type':"eman",'az':options.target_emanaz,'alt':options.target_emanalt,'phi':options.target_emanphi })
elif (options.flipX or options.flipY or options.flipZ):
do_flip=True
else:
print "Nothing to do. Exit"
exit()
TE.invert()
i_rot1,i_tilt1,i_psi1=getEulersIndexes(data1.__getattribute__(dataname1)["labels"])
for record1 in data1.__getattribute__(dataname1)["datas"]:
e1,e2,e3=getEulersFromRelionDataRecord(record1,i_rot1,i_tilt1,i_psi1)
if not do_flip:
myTrans=Transform({"type":"spider","phi":e1,"theta":e2,"psi":e3})
myTE = myTrans * TE
Eulers_by_TE = myTE.get_rotation("spider")
record1[i_rot1]=str(Eulers_by_TE["phi"])
record1[i_tilt1]=str(Eulers_by_TE["theta"])
record1[i_psi1]=str(Eulers_by_TE["psi"])
else:
if options.flipX:
e1 *= -1
e2 = 180. - e2
elif options.flipY:
e1 = 180. - e1
e2 = 180. - e2
elif options.flipZ:
e2 *= -1
record1[i_rot1]=str(e1)
record1[i_tilt1]=str(e2)
record1[i_psi1]=str(e3)
data1.write(options.outputstar)
if options.TransformAlignment:
# ref0 is superimposed with ref1 after applying (A0,t0)
# ref1 is superimposed with current particles after applying (A1,t1) and projection.
# v1 = A0*v0 + t0
# vp = A1*v1 + t1
# Ap = A1*A0
# tp = A1*t0 + t1
try:
TE=Transform([float(i) for i in options.TransformAlignment.split(",")])
except:
try:
TE=Transform([float(i) for i in options.TransformAlignment.split()])
except:
print "Pleae input proper transformation matrix element. Exit"
exit()
i_rot1,i_tilt1,i_psi1=getEulersIndexes(data1.__getattribute__(dataname1)["labels"])
i_x1,i_y1,i_sx1,i_sy1=getCoordOriginIndexes(data1.__getattribute__(dataname1)["labels"])
for record1 in data1.__getattribute__(dataname1)["datas"]:
e1,e2,e3=getEulersFromRelionDataRecord(record1,i_rot1,i_tilt1,i_psi1)
x1,y1,sx1,sy1=getCoordOriginFromRelionDataRecord(record1,i_x1,i_y1,i_sx1,i_sy1)
myTrans=Transform({"type":"spider","phi":e1,"theta":e2,"psi":e3})
myTE = myTrans * TE
Eulers_by_TE = myTE.get_rotation("spider")
record1[i_rot1]=str(Eulers_by_TE["phi"])
record1[i_tilt1]=str(Eulers_by_TE["theta"])
record1[i_psi1]=str(Eulers_by_TE["psi"])
trans_by_TE = myTE.get_trans_2d() * -1
record1[i_sx1]=str(sx1 + trans_by_TE[0] )
record1[i_sy1]=str(sy1 + trans_by_TE[1])
data1.write(options.outputstar)
if options.RandomEulerbySym:
from random import randrange
i_rot1,i_tilt1,i_psi1=getEulersIndexes(data1.__getattribute__(dataname1)["labels"])
syms=Symmetries.get(options.symmetry)
nsyms =syms.get_nsym()
for record1 in data1.__getattribute__(dataname1)["datas"]:
e1,e2,e3 = getEulersFromRelionDataRecord(record1,i_rot1,i_tilt1,i_psi1)
current_trans = Transform({"type":"spider","phi":e1,"theta":e2,"psi":e3})
# Random choose a symmetry index
my_chosen_sym = syms.get_sym(randrange(nsyms))
trans_by_sym = current_trans * my_chosen_sym
Eulers_by_sym = trans_by_sym.get_rotation("spider")
record1[i_rot1]=str(Eulers_by_sym["phi"])
record1[i_tilt1]=str(Eulers_by_sym["theta"])
record1[i_psi1]=str(Eulers_by_sym["psi"])
dirname = os.path.dirname(options.input_data_star1)
basename = os.path.splitext(os.path.basename(options.input_data_star1))[0]
randomsymed_star = os.path.join(dirname,basename+"_randomsym.star")
data1.write(randomsymed_star)
if options.calculateEulerTransform:
if options.baseOnOverlap:
i_x1,i_y1,i_sx1,i_sy1=getCoordOriginIndexes(data1.__getattribute__(dataname1)["labels"])
i_x2,i_y2,i_sx2,i_sy2=getCoordOriginIndexes(data2.__getattribute__(dataname2)["labels"])
i_rot1,i_tilt1,i_psi1=getEulersIndexes(data1.__getattribute__(dataname1)["labels"])
i_rot2,i_tilt2,i_psi2=getEulersIndexes(data2.__getattribute__(dataname2)["labels"])
if options.outputstar:
f = open(options.outputstar,"w")
for record1 in data1.__getattribute__(dataname1)["datas"]:
mic = record1[i_MicrographName_1]
if allmics2.has_key(mic):
for record2 in allmics2[mic]:
if (options.sameImageName and record1[i_ImageName_1] == record2[i_ImageName_2] ) or \
(options.baseOnOverlap and CloseEnough(record1,i_x1,i_y1,i_sx1,i_sy1,options.binningfactor1,record2,i_x2,i_y2,i_sx2,i_sy2,options.binningfactor2,options.overlap)) :
e1,e2,e3=getEulersFromRelionDataRecord(record1,i_rot1,i_tilt1,i_psi1)
e4,e5,e6=getEulersFromRelionDataRecord(record2,i_rot2,i_tilt2,i_psi2)
phi,theta,psi = calculateEulerTransformation(e1,e2,e3,e4,e5,e6,type="spider",symmetry=options.symmetry)
f.write("%s %f %f %f %f %f %f %f\n"%(record1[i_ImageName_1],phi,theta,psi,RegularAnglePhiPsi(phi + psi),e4 - e1,e5 - e2 ,e6 - e3))
break
if options.outputstar:
f.close()
print "The fields in output file are:"
print "ImageName, PhiET, ThetaET, PsiET, PhiET+PsiET, deltaPhi, deltaTheta, deltaPsi"
if __name__ == "__main__":
main()
| gpl-3.0 |
syl20bnr/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| gpl-3.0 |
liuwenf/moose | gui/utils/Plotter.py | 8 | 6217 | #!usr/bin/python
import sys, os, random
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
import numpy, csv
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class PlotWidget(FigureCanvas):
"""This is the canvas Widget. It allows for MPL plot embedding """
def __init__(self, parent=None, width=9.85, height=5 , dpi=50):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
FigureCanvas.updateGeometry(self)
class MPLPlotter(QtGui.QWidget):
"""This is a widget that inherites from the plotWidget class that is used to update the plot with PP data"""
def __init__(self, plotData, plotName, parent = None):
QtGui.QWidget.__init__(self, parent)
self.plotData = plotData
self.plotName = plotName
self.canvas = PlotWidget()
self.plotTitle = plotName + ' Postprocessor'
self.getPlotColor()
self.setPlotData(self.plotData, self.plotName)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
# set button context menu policy
self.canvas.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.connect(self.canvas, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), self.on_context_menu)
# create color menu
self.colorMenu = QtGui.QMenu('Plot Color', self)
royalBlueLine = QtGui.QAction('Blue',self)
royalBlueLine.triggered.connect(self.changeRoyalBlue)
orchidLine = QtGui.QAction('Magenta',self)
orchidLine.triggered.connect(self.changeOrchid)
tomatoLine = QtGui.QAction('Red',self)
tomatoLine.triggered.connect(self.changeTomato)
goldLine = QtGui.QAction('Yellow',self)
goldLine.triggered.connect(self.changeGold)
limeGreenLine = QtGui.QAction('Green',self)
limeGreenLine.triggered.connect(self.changeLimeGreen)
turquoiseLine = QtGui.QAction('Cyan',self)
turquoiseLine.triggered.connect(self.changeTurquoise)
blackLine = QtGui.QAction('Black',self)
blackLine.triggered.connect(self.changeBlack)
self.colorMenu.addAction(royalBlueLine)
self.colorMenu.addAction(orchidLine)
self.colorMenu.addAction(tomatoLine)
self.colorMenu.addAction(goldLine)
self.colorMenu.addAction(limeGreenLine)
self.colorMenu.addAction(turquoiseLine)
self.colorMenu.addAction(blackLine)
# create context menu
saveAction = QtGui.QAction('Save Plot', self)
saveAction.triggered.connect(self.savePlot)
closeAction = QtGui.QAction('Close Plot', self)
closeAction.triggered.connect(self.closePlot)
self.popMenu = QtGui.QMenu(self)
self.popMenu.addAction(saveAction)
self.popMenu.addSeparator()
self.popMenu.addMenu(self.colorMenu)
self.popMenu.addSeparator()
self.popMenu.addAction(closeAction)
def setPlotData(self, plotData, plotName):
self.plotData = plotData
self.plotName = plotName
self.xData = self.plotData[0]
self.yData = self.plotData[1]
# MPL plots
self.canvas.axes.plot(self.xData, self.yData, self.plotColor, linewidth = 2.5)
self.canvas.axes.set_xlabel('time')
self.canvas.axes.set_ylabel(self.plotName)
self.canvas.axes.set_title(self.plotTitle)
self.canvas.draw()
def on_context_menu(self, point):
# show context menu
self.popMenu.exec_(self.canvas.mapToGlobal(point))
def savePlot(self):
file_name = QtGui.QFileDialog.getSaveFileName(self, 'Save file', self.plotTitle, "Images (*.pdf)")
if isinstance(file_name, QtCore.QString):
file_name = str(file_name)
if not isinstance(file_name, basestring): # This happens when using pyside
file_name = file_name[0]
if file_name != '':
self.canvas.print_figure(unicode(file_name), dpi = 100)
def closePlot(self):
self.close()
def changeRoyalBlue(self):
self.plotColor = "RoyalBlue"
self.setPlotData(self.plotData,self.plotName)
def changeOrchid(self):
self.plotColor = "Magenta"
self.setPlotData(self.plotData,self.plotName)
def changeTomato(self):
self.plotColor = "Tomato"
self.setPlotData(self.plotData,self.plotName)
def changeGold(self):
self.plotColor = "Gold"
self.setPlotData(self.plotData,self.plotName)
def changeLimeGreen(self):
self.plotColor = "LimeGreen"
self.setPlotData(self.plotData,self.plotName)
def changeTurquoise(self):
self.plotColor = "DarkTurquoise"
self.setPlotData(self.plotData,self.plotName)
def changeBlack(self):
self.plotColor = "Black"
self.setPlotData(self.plotData,self.plotName)
def getPlotColor(self):
if (self.plotName[0] in ('a','A','f','F','k','K','p','P','u','U','z','Z')):
self.plotColor = "LimeGreen"
elif (self.plotName[0] in ('b','B','g','G','l','L','q','Q','v','V')):
self.plotColor = "DarkTurquoise"
elif (self.plotName[0] in ('c','C','h','H','m','M','r','R','w','W')):
self.plotColor = "RoyalBlue"
elif (self.plotName[0] in ('d','D','i','I','n','N','s','S','x','X')):
self.plotColor = "Magenta"
elif (self.plotName[0] in ('e','E','j','J','o','O','t','T','y','Y')):
self.plotColor = "Tomato"
else:
self.plotColor = "Gold"
| lgpl-2.1 |
jungla/ICOM-fluidity-toolbox | Detectors/offline_advection/plot_vdisp_C.py | 1 | 3507 | #!~/python
import fluidity_tools
import matplotlib as mpl
#mpl.use('ps')
import matplotlib.pyplot as plt
import myfun
import numpy as np
import os
import fio
import advect_functions
exp = 'm_25_1'
filename0 = 'traj_m_25_1_particles_481_3400_3Dv.csv'
tt = 3400-481# IC + 24-48 included
#x0 = range(500,1550,50)
#y0 = range(500,1550,50)
#z0 = range(0,52,2)
x0 = range(0,2000,50)
y0 = range(0,2000,50)
z0 = range(0,52,2)
#x0 = range(0,2000,50)
#y0 = range(0,2000,50)
#z0 = range(2,48,2)
xp = len(x0)
yp = len(y0)
zp = len(z0)
pt = xp*yp*zp
#timet, par0 = advect_functions.read_particles_csv(filename0,pt,tt)
time0 = (timet[:-1])*360 - 360
par = np.reshape(par0,(pt,3,tt))
# FSLE
di = 2
time = time0 - time0[0]
import scipy
for r in [2]:
parL = range(pt-1)
fsle = np.zeros(pt)*np.nan
fslec = np.zeros((pt,3))
df=r*di # separation distance
#
# loop triplets in time
#
for t in range(tt-1):
for p in parL:
# print t,len(parL)
# loop particles
dr = np.linalg.norm(par[p,2,t]-par[p+1,2,t])
if (dr > df and np.isnan(fsle[p])):
fsle[p] = np.log(r)/(time[t]/3600) # fsle has the dimension of the first triplet
fslec[p,:] = par[p,:,0] # coords of the starting point
parL.remove(p)
#
# plot fsle
# 3D arrays of fsle and fslec
#
fsler = np.reshape(fsle,(xp,yp,zp))
fsler[:,:,-1] = 0
# fslexr = np.reshape(fslec[:,0],(nlat,nlon))
# fsleyr = np.reshape(fslec[:,1],(nlat,nlon))
# fslezr = np.reshape(fslec[:,2],(nlat,nlon))
#
plt.subplots(figsize=(9,7))
#plt.contourf(np.asarray(x0)/1000.,z0,np.rot90(scipy.stats.nanmean(fsler[:,:,:],0)),np.linspace(0,np.percentile(fsle[~np.isnan(fsle)],80),30),vmin=0,extend='both')
plt.contourf(np.asarray(y0)/1000.,z0,np.rot90(scipy.stats.nanmean(fsler[:,:,:],0)),np.linspace(0,0.08,30),vmin=0,extend='both')
plt.ylabel('Depth [m]', fontsize=26)
plt.xlabel('Y [km]', fontsize=26)
plt.yticks(np.linspace(0,50,5),np.linspace(-50,0,5),fontsize=24)
plt.xticks(fontsize=24)
cb = plt.colorbar(ticks=np.linspace(0,0.08,5))
cb.ax.tick_params(labelsize=24)
# plt.title(r'\lambda')
plt.tight_layout()
plt.savefig('./plot/'+exp+'/fsle_'+exp+'_'+str(r)+'.eps')
print './plot/'+exp+'/fsle_'+exp+'_'+str(r)+'.eps'
plt.close()
## PDF Vertical Displacement
#
#bins = np.linspace(-50,0,50)
#values = np.zeros((len(bins)-1,len(time0)))
#
#for t in range(0,tt-1,1):
#
# values[:,t], bins = np.histogram(par[:,2,t],bins)
#
#fig = plt.figure(figsize=(12,8))
## ax = fig.add_subplot(111, aspect='equal')
#plt.pcolor(time0/3600,bins,np.log(values),vmin=4)
#plt.colorbar()
#plt.xlim(time0[0]/3600,time0[-1]/3600)
#plt.xlabel('time')
#plt.ylabel('# particles')
#print 'Saving 0 to eps'
# #
## ax.text(1, 9, str(z)+'m, '+str(time[t]*3600)+'h', fontsize=18)
#plt.savefig('./plot/'+exp+'/vdisp_'+exp+'_'+str(tt)+'.eps')
#print './plot/'+exp+'/vdisp_'+exp+'_'+str(tt)+'.eps'
#plt.close()
#
# TRAJECTORIES
plt.subplots(figsize=(8,7))
for p in range(0,10000,103):
plt.plot(time0/86400,par[p,2,:],color='0.5')
plt.plot(time0/86400,par[98,2,:],'r', linewidth=4)
#plt.plot(time0/3600,par[200,2,:-1],'g', linewidth=4)
plt.plot(time0/86400,par[51,2,:],'b', linewidth=4)
plt.xticks(np.linspace(0,10,6),np.linspace(0,10,6).astype(int),fontsize=24)
plt.yticks(fontsize=24)
plt.ylim((-50,0))
plt.xlim(2,9)
plt.xlabel('Time [days]', fontsize=26)
plt.ylabel('Depth [m]', fontsize=26)
plt.tight_layout()
plt.savefig('./plot/'+exp+'/vtraj_'+exp+'_'+str(tt)+'.eps')
print './plot/'+exp+'/vtraj_'+exp+'_'+str(tt)+'.eps'
plt.close()
| gpl-2.0 |
Yue-Jiang/RNASeqQuant | bedtools-2.17.0/docs/conf.py | 1 | 8041 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('pyplots'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bedtools'
copyright = u'2012'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.16.2'
# The full version, including alpha/beta/rc tags.
release = '2.16.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = project + " v" + release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'bedtools.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'bedtools.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bedtools-docs'
# Google analytics
#googleanalytics_id = "UA-24167610-15"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bedtools.tex', u'Bedtools Documentation',
u'Quinlan lab @ UVa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bedtools', u'Bedtools Documentation', [u'UVa'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['numpy', 'matplotlib', 'matplotlib.pyplot',
'matplotlib.sphinxext', 'matplotlib.sphinxext.plot_directive']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| mit |
public-ink/public-ink | server/appengine/lib/matplotlib/tri/tritools.py | 10 | 12880 | """
Tools for triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.tri import Triangulation
import numpy as np
class TriAnalyzer(object):
"""
Define basic tools for triangular mesh analysis and improvement.
A TriAnalizer encapsulates a :class:`~matplotlib.tri.Triangulation`
object and provides basic tools for mesh analysis and mesh improvement.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The encapsulated triangulation to analyze.
Attributes
----------
`scale_factors`
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
@property
def scale_factors(self):
"""
Factors to rescale the triangulation into a unit square.
Returns *k*, tuple of 2 scale factors.
Returns
-------
k : tuple of 2 floats (kx, ky)
Tuple of floats that would rescale the triangulation :
``[triangulation.x * kx, triangulation.y * ky]``
fits exactly inside a unit square.
"""
compressed_triangles = self._triangulation.get_masked_triangles()
node_used = (np.bincount(np.ravel(compressed_triangles),
minlength=self._triangulation.x.size) != 0)
x = self._triangulation.x[node_used]
y = self._triangulation.y[node_used]
ux = np.max(x)-np.min(x)
uy = np.max(y)-np.min(y)
return (1./float(ux), 1./float(uy))
def circle_ratios(self, rescale=True):
"""
Returns a measure of the triangulation triangles flatness.
The ratio of the incircle radius over the circumcircle radius is a
widely used indicator of a triangle flatness.
It is always ``<= 0.5`` and ``== 0.5`` only for equilateral
triangles. Circle ratios below 0.01 denote very flat triangles.
To avoid unduly low values due to a difference of scale between the 2
axis, the triangular mesh can first be rescaled to fit inside a unit
square with :attr:`scale_factors` (Only if *rescale* is True, which is
its default value).
Parameters
----------
rescale : boolean, optional
If True, a rescaling will be internally performed (based on
:attr:`scale_factors`, so that the (unmasked) triangles fit
exactly inside a unit square mesh. Default is True.
Returns
-------
circle_ratios : masked array
Ratio of the incircle radius over the
circumcircle radius, for each 'rescaled' triangle of the
encapsulated triangulation.
Values corresponding to masked triangles are masked out.
"""
# Coords rescaling
if rescale:
(kx, ky) = self.scale_factors
else:
(kx, ky) = (1.0, 1.0)
pts = np.vstack([self._triangulation.x*kx,
self._triangulation.y*ky]).T
tri_pts = pts[self._triangulation.triangles]
# Computes the 3 side lengths
a = tri_pts[:, 1, :] - tri_pts[:, 0, :]
b = tri_pts[:, 2, :] - tri_pts[:, 1, :]
c = tri_pts[:, 0, :] - tri_pts[:, 2, :]
a = np.sqrt(a[:, 0]**2 + a[:, 1]**2)
b = np.sqrt(b[:, 0]**2 + b[:, 1]**2)
c = np.sqrt(c[:, 0]**2 + c[:, 1]**2)
# circumcircle and incircle radii
s = (a+b+c)*0.5
prod = s*(a+b-s)*(a+c-s)*(b+c-s)
# We have to deal with flat triangles with infinite circum_radius
bool_flat = (prod == 0.)
if np.any(bool_flat):
# Pathologic flow
ntri = tri_pts.shape[0]
circum_radius = np.empty(ntri, dtype=np.float64)
circum_radius[bool_flat] = np.inf
abc = a*b*c
circum_radius[~bool_flat] = abc[~bool_flat] / (
4.0*np.sqrt(prod[~bool_flat]))
else:
# Normal optimized flow
circum_radius = (a*b*c) / (4.0*np.sqrt(prod))
in_radius = (a*b*c) / (4.0*circum_radius*s)
circle_ratio = in_radius/circum_radius
mask = self._triangulation.mask
if mask is None:
return circle_ratio
else:
return np.ma.array(circle_ratio, mask=mask)
def get_flat_tri_mask(self, min_circle_ratio=0.01, rescale=True):
"""
Eliminates excessively flat border triangles from the triangulation.
Returns a mask *new_mask* which allows to clean the encapsulated
triangulation from its border-located flat triangles
(according to their :meth:`circle_ratios`).
This mask is meant to be subsequently applied to the triangulation
using :func:`matplotlib.tri.Triangulation.set_mask` .
*new_mask* is an extension of the initial triangulation mask
in the sense that an initially masked triangle will remain masked.
The *new_mask* array is computed recursively ; at each step flat
triangles are removed only if they share a side with the current
mesh border. Thus no new holes in the triangulated domain will be
created.
Parameters
----------
min_circle_ratio : float, optional
Border triangles with incircle/circumcircle radii ratio r/R will
be removed if r/R < *min_circle_ratio*. Default value: 0.01
rescale : boolean, optional
If True, a rescaling will first be internally performed (based on
:attr:`scale_factors` ), so that the (unmasked) triangles fit
exactly inside a unit square mesh. This rescaling accounts for the
difference of scale which might exist between the 2 axis. Default
(and recommended) value is True.
Returns
-------
new_mask : array-like of booleans
Mask to apply to encapsulated triangulation.
All the initially masked triangles remain masked in the
*new_mask*.
Notes
-----
The rationale behind this function is that a Delaunay
triangulation - of an unstructured set of points - sometimes contains
almost flat triangles at its border, leading to artifacts in plots
(especially for high-resolution contouring).
Masked with computed *new_mask*, the encapsulated
triangulation would contain no more unmasked border triangles
with a circle ratio below *min_circle_ratio*, thus improving the
mesh quality for subsequent plots or interpolation.
Examples
--------
Please refer to the following illustrating example:
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_delaunay.py
"""
# Recursively computes the mask_current_borders, true if a triangle is
# at the border of the mesh OR touching the border through a chain of
# invalid aspect ratio masked_triangles.
ntri = self._triangulation.triangles.shape[0]
mask_bad_ratio = self.circle_ratios(rescale) < min_circle_ratio
current_mask = self._triangulation.mask
if current_mask is None:
current_mask = np.zeros(ntri, dtype=np.bool)
valid_neighbors = np.copy(self._triangulation.neighbors)
renum_neighbors = np.arange(ntri, dtype=np.int32)
nadd = -1
while nadd != 0:
# The active wavefront is the triangles from the border (unmasked
# but with a least 1 neighbor equal to -1
wavefront = ((np.min(valid_neighbors, axis=1) == -1)
& ~current_mask)
# The element from the active wavefront will be masked if their
# circle ratio is bad.
added_mask = np.logical_and(wavefront, mask_bad_ratio)
current_mask = (added_mask | current_mask)
nadd = np.sum(added_mask)
# now we have to update the tables valid_neighbors
valid_neighbors[added_mask, :] = -1
renum_neighbors[added_mask] = -1
valid_neighbors = np.where(valid_neighbors == -1, -1,
renum_neighbors[valid_neighbors])
return np.ma.filled(current_mask, True)
def _get_compressed_triangulation(self, return_tri_renum=False,
return_node_renum=False):
"""
Compress (if masked) the encapsulated triangulation.
Returns minimal-length triangles array (*compressed_triangles*) and
coordinates arrays (*compressed_x*, *compressed_y*) that can still
describe the unmasked triangles of the encapsulated triangulation.
Parameters
----------
return_tri_renum : boolean, optional
Indicates whether a renumbering table to translate the triangle
numbers from the encapsulated triangulation numbering into the
new (compressed) renumbering will be returned.
return_node_renum : boolean, optional
Indicates whether a renumbering table to translate the nodes
numbers from the encapsulated triangulation numbering into the
new (compressed) renumbering will be returned.
Returns
-------
compressed_triangles : array-like
the returned compressed triangulation triangles
compressed_x : array-like
the returned compressed triangulation 1st coordinate
compressed_y : array-like
the returned compressed triangulation 2nd coordinate
tri_renum : array-like of integers
renumbering table to translate the triangle numbers from the
encapsulated triangulation into the new (compressed) renumbering.
-1 for masked triangles (deleted from *compressed_triangles*).
Returned only if *return_tri_renum* is True.
node_renum : array-like of integers
renumbering table to translate the point numbers from the
encapsulated triangulation into the new (compressed) renumbering.
-1 for unused points (i.e. those deleted from *compressed_x* and
*compressed_y*). Returned only if *return_node_renum* is True.
"""
# Valid triangles and renumbering
tri_mask = self._triangulation.mask
compressed_triangles = self._triangulation.get_masked_triangles()
ntri = self._triangulation.triangles.shape[0]
tri_renum = self._total_to_compress_renum(tri_mask, ntri)
# Valid nodes and renumbering
node_mask = (np.bincount(np.ravel(compressed_triangles),
minlength=self._triangulation.x.size) == 0)
compressed_x = self._triangulation.x[~node_mask]
compressed_y = self._triangulation.y[~node_mask]
node_renum = self._total_to_compress_renum(node_mask)
# Now renumbering the valid triangles nodes
compressed_triangles = node_renum[compressed_triangles]
# 4 cases possible for return
if not return_tri_renum:
if not return_node_renum:
return compressed_triangles, compressed_x, compressed_y
else:
return (compressed_triangles, compressed_x, compressed_y,
node_renum)
else:
if not return_node_renum:
return (compressed_triangles, compressed_x, compressed_y,
tri_renum)
else:
return (compressed_triangles, compressed_x, compressed_y,
tri_renum, node_renum)
@staticmethod
def _total_to_compress_renum(mask, n=None):
"""
Parameters
----------
mask : 1d boolean array or None
mask
n : integer
length of the mask. Useful only id mask can be None
Returns
-------
renum : integer array
array so that (`valid_array` being a compressed array
based on a `masked_array` with mask *mask*) :
- For all i such as mask[i] = False:
valid_array[renum[i]] = masked_array[i]
- For all i such as mask[i] = True:
renum[i] = -1 (invalid value)
"""
if n is None:
n = np.size(mask)
if mask is not None:
renum = -np.ones(n, dtype=np.int32) # Default num is -1
valid = np.arange(n, dtype=np.int32).compress(~mask, axis=0)
renum[valid] = np.arange(np.size(valid, 0), dtype=np.int32)
return renum
else:
return np.arange(n, dtype=np.int32)
| gpl-3.0 |
costypetrisor/scikit-learn | sklearn/metrics/scorer.py | 13 | 13090 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
TensorVision/MediSeg | AP4/sst_segmenter.py | 1 | 15147 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Get patches of the raw image as input."""
import json
import os
import numpy as np
import logging
import sys
import time
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
from PIL import Image
import numpy
import math
# ML
from keras.models import model_from_yaml
from keras.preprocessing.image import img_to_array
import scipy.misc
import sklearn
# Model
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Convolution2D, Reshape
from keras.layers import MaxPooling2D, Dropout
import keras.optimizers
from tensorvision.utils import load_segmentation_mask
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from utils import get_file_list
import analyze
from seg_utils import get_image
def generate_batch(hypes, phase):
"""
Generate patches.
Parameters
----------
hypes : dict
phase : 'train' or 'test'
"""
x_files, y_files = get_file_list(hypes, phase)
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
batch_x, batch_y = [], []
while True:
for x, y in zip(x_files, y_files):
logging.info("Read '%s' for data...", x)
image = get_image(x, 'RGB')
label = load_segmentation_mask(hypes, y)
im = Image.open(x, 'r')
width, height = im.size
image_vals = get_features(hypes, image, 'data')
label_vals = get_features(hypes, label, 'label')
# print("image_vals = %s" % str(list(image_vals)))
for patch, label_ in zip(image_vals, label_vals):
patch = img_to_array(patch)
label_ = img_to_array(label_)
_, w, h = label_.shape
label_ = label_.reshape((w, h))
if phase == 'val' and 1.0 not in label_:
print("continue")
continue
# scipy.misc.imshow(patch)
# scipy.misc.imshow(label_)
batch_x.append(patch)
batch_y.append(label_) # .flatten()
if len(batch_x) == hypes['solver']['batch_size']:
yield (np.array(batch_x), np.array(batch_y))
batch_x, batch_y = [], []
def get_features(hypes, image, img_type):
"""
Get features from image.
Parameters
----------
hypes : dict
Hyperparamters such as arch>stride, arch>patch_size
image : numpy array
img_type : {'data', 'label'}
Yields
------
numpy array
patch of size stride x stride from image
"""
if img_type == 'data':
width, height, _ = image.shape
else:
width, height = image.shape
stride = hypes['arch']['stride']
patch_size = hypes['arch']['patch_size']
if img_type == 'data':
window_width = patch_size
left_pad = 0
top_pad = 0
else:
window_width = stride
left_pad = int(math.floor(patch_size - stride) / 2)
top_pad = left_pad
for x in range(left_pad, width - window_width, stride):
for y in range(top_pad, height - window_width, stride):
res = image[x:(x + window_width), y:(y + window_width)]
if res.shape[0] != window_width or res.shape[1] != window_width:
print("res shape: %s" % str(res.shape))
print("window_width: %s" % str(window_width))
continue # quick fix
assert res.shape[0] == window_width, \
("width (res.shape[0]=%i, window_width=%i)" %
(res.shape[0], window_width))
assert res.shape[1] == window_width, \
("height (res.shape[1]=%i, window_width=%i)" %
(res.shape[1], window_width))
yield res
def get_traindata_single_file(hypes, x, y):
"""Get trainingdata for a single file x with segmentation file y."""
xs, ys = [], []
logging.info("Read '%s' for data...", x)
label = load_segmentation_mask(hypes, y)
im = Image.open(x, 'RGB')
width, height, _ = im.size
for x in range(width):
for y in range(height):
image_val = get_features(hypes, im, 'data')
label_val = label[y][x]
xs.append(image_val)
ys.append(label_val)
return numpy.array(xs), numpy.array(ys, dtype=int)
def main(hypes_file, out_dir, override):
"""Orchestrate."""
with open(hypes_file, 'r') as f:
hypes = json.load(f)
model_file_path = '%s.yaml' % hypes['model']['name']
weights_file_path = '%s.hdf5' % hypes['model']['name']
if not os.path.isfile(model_file_path) or override:
patch_size = hypes['arch']['patch_size']
img_channels = hypes['arch']['num_channels']
nb_out = hypes['arch']['stride']**len(hypes['classes'])
model = Sequential()
model.add(Convolution2D(64, 3, 3, border_mode='valid',
init='glorot_normal',
activation='sigmoid',
input_shape=(img_channels,
patch_size,
patch_size)))
model.add(Convolution2D(32, 3, 3,
activation='relu',
init='glorot_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
# model.add(Convolution2D(64, 3, 3, border_mode='same'))
# model.add(Activation('relu'))
# model.add(Convolution2D(64, 3, 3))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dense(64, activation='sigmoid'))
# # model.add(Dropout(0.5))
# model.add(Dense(64, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(nb_out,
activation='sigmoid',
init='glorot_normal'))
model.add(Reshape((hypes['arch']['stride'], hypes['arch']['stride'])))
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
opt = keras.optimizers.Adadelta(lr=hypes['solver']['learning_rate'],
rho=0.95,
epsilon=1e-08)
model.compile(loss=hypes['solver']['loss'],
optimizer=opt) # hypes['solver']['optimizer']
logging.info("model compiled")
# while 1:
# b = generate_batch(hypes, 'train')
# for e in range(10):
# print 'Epoch', e
# batches = 0
# for X_batch, Y_batch in generate_batch(hypes, 'train'):
# Y_batch = np.reshape(Y_batch, (-1, 400))
# loss = model.fit(X_batch,
# Y_batch,
# batch_size=hypes['solver']['batch_size'])
# print(loss)
# batches += 1
# if e > 2:
# # we need to break the loop by hand because
# # the generator loops indefinitely
# break
# # Train
g = generate_batch(hypes, 'train')
logging.info("generate_batch")
X_test, Y_test = g.next()
# print("#" * 80)
# print(X_test.shape)
# print(Y_test.shape)
logging.info("start fit_generator")
model.fit_generator(generate_batch(hypes, 'train'),
samples_per_epoch=hypes['solver']['samples_per_epoch'],
nb_epoch=hypes['solver']['epochs'],
verbose=1,
validation_data=(X_test, Y_test))
x_files, y_files = get_file_list(hypes, 'train')
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
# ij = 0
# for epoch in range(1, hypes['solver']['epochs'] + 1):
# print("#" * 80)
# print("# Epoch %i" % epoch)
# print("#" * 80)
# x_files, y_files = sklearn.utils.shuffle(x_files,
# y_files,
# random_state=epoch)
# for x_train_file, y_train_file in zip(x_files, y_files):
# x_train, y_train = get_traindata_single_file(hypes,
# x_train_file,
# y_train_file)
# # Reduce data
# # x_train, y_train = reduce_data_equal(x_train,
# # y_train)
# t0 = time.time()
# model.fit(x_train, y_train,
# batch_size=128,
# nb_epoch=1,
# )
# ij += 1
# print("%i of %i" %
# (ij, hypes['solver']['epochs'] * len(x_files)))
# t1 = time.time()
# print("Training Time: %0.4f" % (t1 - t0))
print("done with fit_generator")
# save as YAML
yaml_string = model.to_yaml()
with open(model_file_path, 'w') as f:
f.write(yaml_string)
model.save_weights(weights_file_path)
# Evaluate
data = get_file_list(hypes, 'test')
analyze.evaluate(hypes,
data,
out_dir,
model,
elements=[0, 1],
get_segmentation=get_segmentation,
verbose=True)
else:
with open(model_file_path) as f:
yaml_string = f.read()
model = model_from_yaml(yaml_string)
model.load_weights(weights_file_path)
model.compile(optimizer=hypes['solver']['optimizer'],
loss='binary_crossentropy')
data = get_file_list(hypes, 'test')
analyze.evaluate(hypes,
data,
out_dir,
model,
elements=[0, 1],
get_segmentation=get_segmentation,
verbose=True)
def get_segmentation(hypes, image_path, model):
"""
Get a segmentation.
Path
----
hypes : dict
Hyperparameters (model specific information)
image_path : str
Path to a file which gets segmented.
model : object
Returns
-------
Numpy array of the same width and height as input.
"""
# Load raw image
image = get_image(image_path, 'RGB')
height, width, _ = image.shape
# Make the image an appropriate shape
stride = hypes['arch']['stride']
patch_size = hypes['arch']['patch_size']
# How often does the window get applied in the different directions?
width_n = int(math.ceil(width / stride))
height_n = int(math.ceil(height / stride))
assert patch_size >= stride
left_pad = int(math.floor(patch_size - stride) / 2)
right_pad = (((width_n * stride - width) % stride) +
(patch_size - stride - left_pad))
top_pad = left_pad
bottom_pad = (((height_n * stride - height) % stride) +
(patch_size - stride - top_pad))
pad_width = ((top_pad, bottom_pad),
(left_pad, right_pad),
(0, 0))
image = numpy.pad(image,
pad_width=pad_width,
mode='constant')
segmentation = numpy.zeros(shape=(height, width))
# Generate input patches of image
patches = []
coords = []
for i in range(width_n):
for j in range(height_n):
x = stride * i
y = stride * j
patch = image[y:(y + patch_size), x:(x + patch_size)]
patch = img_to_array(patch)
assert patch.shape == (3, patch_size, patch_size), \
"patch had shape %s" % str(patch.shape)
patches.append(patch)
coords.append({'x': i, 'y': j})
if len(patches) == hypes['solver']['batch_size']:
patches = numpy.array(patches)
# Run model on input patches and collect output patches
res = model.predict(patches)
# Assemble output patches to segmentation image
for coords, res in zip(coords, res):
res = res.reshape(stride, stride)
for m in range(stride):
for n in range(stride):
value = res[n][m]
x = coords['x'] * stride + m
y = coords['y'] * stride + n
segmentation[y][x] = value
# Cleanup for next batch
patches, coords = [], []
# scipy.misc.imshow(segmentation)
print("amax=%0.4f, mean=%0.4f, median=%0.4f, 70%%=%0.4f, 95%%=%0.4f" %
(np.amax(segmentation),
np.mean(segmentation),
np.median(segmentation),
np.percentile(segmentation, 70),
np.percentile(segmentation, 95)))
threshold = np.percentile(segmentation, 95)
return segmentation > threshold
def is_valid_file(parser, arg):
"""
Check if arg is a valid file that already exists on the file system.
Parameters
----------
parser : argparse object
arg : str
Returns
-------
arg
"""
arg = os.path.abspath(arg)
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_parser():
"""Get parser object for basic local classifier."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--hypes",
dest="hypes_file",
help=("Configuration file in JSON format"),
type=lambda x: is_valid_file(parser, x),
metavar="FILE",
required=True)
parser.add_argument("--out",
dest="data",
help=("output directory"),
required=True)
parser.add_argument("--override",
action="store_true", dest="override", default=False,
help="override old model, if it exists")
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
main(args.hypes_file, args.data, args.override)
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/multi_image.py | 12 | 2201 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, axes, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
class ImageFollower:
'update image in response to changes in clim or cmap on another image'
def __init__(self, follower):
self.follower = follower
def __call__(self, leader):
self.follower.set_cmap(leader.get_cmap())
self.follower.set_clim(leader.get_clim())
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].callbacksSM.connect('changed', ImageFollower(im))
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this interactively and
# modify the colormap:
axes(ax[0]) # Return the current axes to the first one,
sci(images[0]) # because the current image must be in current axes.
show()
| mit |
mmilaprat/policycompass-services | apps/datasetmanager/file_encoder.py | 2 | 4779 | """
Converts uploaded files into a tabular data structure
"""
import pandas
import codecs
import csv
import datetime
import logging
import os
from xlrd import open_workbook, XL_CELL_DATE, xldate_as_tuple
log = logging.getLogger(__name__)
__author__ = 'fki'
class FileEncoder(object):
"""
Converts uploaded files into a tabular data structure
"""
# Register supported extensions with a function
supported_extensions = {
'.csv': '_csv_encode',
'.tsv': '_tsv_encode',
'.xlsx': '_xlsx_encode',
'.xls': '_xlsx_encode'
}
# file: InMemoryUploadedFile
def __init__(self, file, jsonData):
"""
Initialize file and file_name
"""
self.file = file
self.file_name, self.file_ext = os.path.splitext(file.name)
if len(jsonData) != 0:
self.jsonData = jsonData
def is_supported(self):
"""
Check if there is a converter for the extension
"""
if self.file_ext in self.supported_extensions:
return True
else:
return False
def encode(self):
"""
Class the responsible function and encodes the file.
Returns the result
"""
result = getattr(self, self.supported_extensions[self.file_ext])()
return result
def _csv_encode(self, delimiter=','):
"""
Encodes a CSV file.
"""
try:
try:
csvdata = pandas.read_csv(self.jsonData['result']['url'])
if ';' in csvdata.values[len(csvdata.values) / 2][0]:
csvdata = pandas.read_csv(self.jsonData['result']['url'], sep=';')
except:
csvdata = pandas.read_csv(self.jsonData['result']['url'], quoting=3)
colHeadersValues = []
for q in range(0, len(csvdata.axes[1])):
colHeadersValues.append(csvdata.axes[1][q])
completeArray = []
completeArray.append(colHeadersValues)
rowIndex = 0
for x in range(0, len(csvdata.values)):
rowArray = []
rowIndex = rowIndex + 1
for y in range(0, len(csvdata.values[x])):
if pandas.isnull(csvdata.values[x][y]):
rowArray.append("")
else:
rowArray.append(csvdata.values[x][y])
completeArray.append(rowArray)
return completeArray
except:
r = []
reader = csv.reader(codecs.iterdecode(self.file, "utf-8"),
delimiter=delimiter)
for row in reader:
log.debug(str(row))
r.append(row)
return r
def _tsv_encode(self):
"""
Encodes a TSV file
:return: array of data rows
"""
try:
tsvdata = pandas.read_csv(self.jsonData['result']['url'], sep='\t')
colHeadersValues = []
for q in range(0, len(tsvdata.axes[1])):
colHeadersValues.append(tsvdata.axes[1][q])
completeArray = []
completeArray.append(colHeadersValues)
rowIndex = 0
for x in range(0, len(tsvdata.values)):
rowArray = []
rowIndex = rowIndex + 1
for y in range(0, len(tsvdata.values[x])):
if pandas.isnull(tsvdata.values[x][y]):
rowArray.append("")
else:
rowArray.append(tsvdata.values[x][y])
completeArray.append(rowArray)
return completeArray
except:
return self._csv_encode(delimiter='\t')
def _xlsx_encode(self):
"""
Encodes XLS and XLSX files.
"""
r = []
wb = open_workbook(file_contents=self.file.read())
sheet = wb.sheet_by_index(0)
for row in range(sheet.nrows):
values = []
for col in range(sheet.ncols):
cell = sheet.cell(row, col)
# Date cells have to be converted to return as string
if cell.ctype == XL_CELL_DATE:
v = xldate_as_tuple(cell.value, wb.datemode)
v = datetime.datetime(*v)
v = datetime.date(v.year, v.month, v.day)
elif isinstance(cell.value, float):
if cell.value == int(cell.value):
v = str(int(cell.value))
else:
v = str(cell.value)
else:
v = str(cell.value)
values.append(v)
r.append(values)
return r
| agpl-3.0 |
JoeJimFlood/RugbyPredictifier | 2018SuperRugby/matchup - Copy.py | 1 | 15678 | import os
os.chdir(os.path.dirname(__file__))
import sim_util
import sys
import pandas as pd
import numpy as np
from numpy.random import poisson, uniform
from numpy import mean
import time
import math
po = False
team_homes = pd.read_csv(os.path.join(os.path.split(__file__)[0], 'TeamHomes.csv'), header = None, index_col = 0)
stadium_locs = pd.read_csv(os.path.join(os.path.split(__file__)[0], 'StadiumLocs.csv'), index_col = 0)
teamsheetpath = os.path.join(os.path.split(__file__)[0], 'Score Tables')
compstat = {'TF': 'TA', 'TA': 'TF', #Dictionary to use to compare team stats with opponent stats
'CF': 'CA', 'CA': 'CF',
'CON%F': 'CON%A', 'CON%A': 'CON%F',
'PF': 'PA', 'PA': 'PF',
'DGF': 'DGA', 'DGA': 'DGF'}
def weighted_variance(data, weights):
assert len(data) == len(weights), 'Data and weights must be same length'
weighted_average = np.average(data, weights = weights)
v1 = weights.sum()
v2 = np.square(weights).sum()
return (weights*np.square(data - weighted_average)).sum() / (v1 - (v2/v1))
def get_opponent_stats(opponent, venue): #Gets summaries of statistics for opponent each week
opponent_stats = {}
global teamsheetpath, stadium_locs, team_homes
opp_stats = pd.DataFrame.from_csv(os.path.join(teamsheetpath, opponent + '.csv'))
opponent_home = team_homes[1][opponent]
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
(opponent_home_lat, opponent_home_lng) = stadium_locs.loc[opponent_home, ['Lat', 'Long']]
opponent_reference_distance = geodesic_distance(opponent_home_lat, opponent_home_lng, venue_lat, venue_lng)
def get_opponent_weight(location):
return get_travel_weight(location, opponent_home_lat, opponent_home_lng, opponent_reference_distance)
opp_stats['Weight'] = opp_stats['VENUE'].apply(get_opponent_weight)
for stat in opp_stats.columns:
if stat != 'VENUE':
if stat != 'OPP':
opponent_stats.update({stat: np.average(opp_stats[stat], weights = opp_stats['Weight'])})
opponent_stats.update({'CON%F': float((opp_stats['CF']*opp_stats['Weight']).sum())/(opp_stats['TF']*opp_stats['Weight']).sum()})
opponent_stats.update({'CON%A': float((opp_stats['CA']*opp_stats['Weight']).sum())/(opp_stats['TA']*opp_stats['Weight']).sum()})
return opponent_stats
def get_residual_performance(score_df): #Get how each team has done compared to the average performance of their opponents
global teamsheetpath, team_homes, stadium_locs
#score_df = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team + '.csv'))
residual_stats = {}
residual_variances = {}
score_df['CON%F'] = np.nan
score_df['CON%A'] = np.nan
for week in score_df.index:
opponent_stats = get_opponent_stats(score_df['OPP'][week], score_df['VENUE'][week])
for stat in opponent_stats:
if week == score_df.index.tolist()[0]:
score_df['OPP_' + stat] = np.nan
score_df['OPP_' + stat][week] = opponent_stats[stat]
score_df['CON%F'][week] = float(score_df['CF'][week]) / score_df['TF'][week]
score_df['CON%A'][week] = float(score_df['CA'][week]) / score_df['TA'][week]
for stat in opponent_stats:
if stat == 'Weight':
continue
score_df['R_' + stat] = score_df[stat] - score_df['OPP_' + compstat[stat]]
if stat in ['TF', 'PF', 'DGF', 'TA', 'PA', 'DGA']:
residual_stats.update({stat: np.average(score_df['R_' + stat], weights = score_df['Weight'])})
residual_variances[stat] = weighted_variance(score_df['R_' + stat], score_df['Weight'])
elif stat == 'CON%F':
residual_stats.update({stat: (score_df['R_CON%F'].multiply(score_df['TF'])*score_df['Weight']).sum() / (score_df['TF']*score_df['Weight']).sum()})
elif stat == 'CON%A':
residual_stats.update({stat: (score_df['R_CON%A'].multiply(score_df['TA'])*score_df['Weight']).sum() / (score_df['TA']*score_df['Weight']).sum()})
return residual_stats, pd.Series(residual_variances)
def get_score(expected_scores): #Get the score for a team based on expected scores
score = 0
if expected_scores['T'] > 0:
tries = poisson(expected_scores['T'])
else:
tries = poisson(0.01)
score = score + 6 * tries
if expected_scores['P'] > 0:
fgs = poisson(expected_scores['P'])
else:
fgs = poisson(0.01)
score = score + 3 * fgs
if expected_scores['DG'] > 0:
sfs = poisson(expected_scores['DG'])
else:
sfs = poisson(0.01)
score = score + 2 * sfs
for t in range(tries):
successful_con_determinant = uniform(0, 1)
if successful_con_determinant <= expected_scores['CONPROB']:
score += 2
else:
continue
#if tries >= 4:
# bp = True
#else:
# bp = False
return (score, tries)
def game(team_1, team_2,
expected_scores_1, expected_scores_2,
playoff = False): #Get two scores and determine a winner
(score_1, tries_1) = get_score(expected_scores_1)
(score_2, tries_2) = get_score(expected_scores_2)
if tries_1 - tries_2 >= 3:
bp1 = True
bp2 = False
elif tries_2 - tries_1 >= 3:
bp1 = False
bp2 = True
else:
bp1 = False
bp2 = False
if score_1 > score_2:
win_1 = 1
win_2 = 0
draw_1 = 0
draw_2 = 0
if bp1:
bpw1 = 1
else:
bpw1 = 0
if bp2:
bpl2 = 1
else:
bpl2 = 0
bpl1 = 0
bpw2 = 0
bpd1 = 0
bpd2 = 0
lbp1 = 0
if score_1 - score_2 <= 7:
lbp2 = 1
else:
lbp2 = 0
elif score_2 > score_1:
win_1 = 0
win_2 = 1
draw_1 = 0
draw_2 = 0
if bp1:
bpl1 = 1
else:
bpl1 = 0
if bp2:
bpw2 = 1
else:
bpw2 = 0
bpw1 = 0
bpl2 = 0
bpd1 = 0
bpd2 = 0
lbp2 = 0
if score_2 - score_1 <= 7:
lbp1 = 1
else:
lbp1 = 0
else:
if playoff:
win_1 = 0.5
win_2 = 0.5
draw_1 = 0
draw_2 = 0
bpw1 = 0
bpw2 = 0
bpd1 = 0
bpd2 = 0
bpl1 = 0
bpl2 = 0
lbp1 = 0
lbp2 = 0
else:
win_1 = 0
win_2 = 0
draw_1 = 1
draw_2 = 1
bpw1 = 0
bpw2 = 0
bpl1 = 0
bpl2 = 0
lbp1 = 0
lbp2 = 0
if bp1:
bpd1 = 1
else:
bpd1 = 0
if bp2:
bpd2 = 1
else:
bpd2 = 0
summary = {team_1: [win_1, draw_1, score_1, bpw1, bpd1, bpl1, lbp1]}
summary.update({team_2: [win_2, draw_2, score_2, bpw2, bpd2, bpl2, lbp2]})
return summary
def get_expected_scores(team_1_stats, team_2_stats, team_1_df, team_2_df): #Get the expected scores for a matchup based on the previous teams' performances
expected_scores = {}
for stat in team_1_stats:
expected_scores.update({'T': mean([team_1_stats['TF'] + np.average(team_2_df['TA'], weights = team_2_df['Weight']),
team_2_stats['TA'] + np.average(team_1_df['TF'], weights = team_1_df['Weight'])])})
expected_scores.update({'P': mean([team_1_stats['PF'] + np.average(team_2_df['PA'], weights = team_2_df['Weight']),
team_2_stats['PA'] + np.average(team_1_df['PF'], weights = team_1_df['Weight'])])})
expected_scores.update({'DG': mean([team_1_stats['DGF'] + np.average(team_2_df['DGA'], weights = team_2_df['Weight']),
team_2_stats['DGA'] + np.average(team_1_df['DGF'], weights = team_1_df['Weight'])])})
expected_scores['DG'] = max(expected_scores['DG'], 0)
#print mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
# team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
conprob = mean([team_1_stats['CON%F'] + (team_2_df['CA']*team_2_df['Weight']).sum() / (team_2_df['TA']*team_2_df['Weight']).sum(),
team_2_stats['CON%A'] + (team_1_df['CF']*team_1_df['Weight']).sum() / (team_1_df['TF']*team_1_df['Weight']).sum()])
if not math.isnan(conprob):
expected_scores.update({'CONPROB': conprob})
else:
expected_scores.update({'CONPROB': 0.75})
#print(expected_scores['PAT1PROB'])
#print(expected_scores)
return expected_scores
def geodesic_distance(olat, olng, dlat, dlng):
'''
Returns geodesic distance in percentage of half the earth's circumference between two points on the earth's surface
'''
scale = math.tau/360
olat *= scale
olng *= scale
dlat *= scale
dlng *= scale
delta_lat = (dlat - olat)
delta_lng = (dlng - olng)
a = math.sin(delta_lat/2)**2 + math.cos(olat)*math.cos(dlat)*math.sin(delta_lng/2)**2
return 4*math.atan2(math.sqrt(a), math.sqrt(1-a))/math.tau
def get_travel_weight(venue, home_lat, home_lng, reference_distance):
'''
Gets the travel weight based on a venue, a team's home lat/long coordinates, and a reference distance
'''
global stadium_locs
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
travel_distance = geodesic_distance(home_lat, home_lng, venue_lat, venue_lng)
return 1 - abs(travel_distance - reference_distance)
def get_score(expected_scores, score_array, n_sim, return_tries = True):
tf = sim_util.sim(expected_scores['T'][0], expected_scores['T'][1], n_sim)
cf = np.random.binomial(tf, expected_scores['C'])
pf = sim_util.sim(expected_scores['P'][0], expected_scores['P'][1], n_sim)
dgf = sim_util.sim(expected_scores['DG'][0], expected_scores['DG'][1], n_sim)
score = sim_util.calculate_score((tf, cf, pf, dgf), score_array)
if return_tries:
return score, tf
else:
return score
def matchup(team_1, team_2, venue = None):
ts = time.time()
global team_homes, stadium_locs
team_1_home = team_homes[1][team_1]
team_2_home = team_homes[1][team_2]
if venue is None:
venue = team_homes[1][team_1]
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
(team_1_home_lat, team_1_home_lng) = stadium_locs.loc[team_1_home, ['Lat', 'Long']]
(team_2_home_lat, team_2_home_lng) = stadium_locs.loc[team_2_home, ['Lat', 'Long']]
team_1_reference_distance = geodesic_distance(team_1_home_lat, team_1_home_lng, venue_lat, venue_lng)
team_2_reference_distance = geodesic_distance(team_2_home_lat, team_2_home_lng, venue_lat, venue_lng)
def get_team_1_weight(location):
return get_travel_weight(location, team_1_home_lat, team_1_home_lng, team_1_reference_distance)
def get_team_2_weight(location):
return get_travel_weight(location, team_2_home_lat, team_2_home_lng, team_2_reference_distance)
team_1_season = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team_1 + '.csv'))
team_2_season = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team_2 + '.csv'))
team_1_season['Weight'] = team_1_season['VENUE'].apply(get_team_1_weight)
team_2_season['Weight'] = team_2_season['VENUE'].apply(get_team_2_weight)
stats_1, variances_1 = get_residual_performance(team_1_season)
stats_2, variances_2 = get_residual_performance(team_2_season)
expected_scores_1 = get_expected_scores(stats_1, stats_2, team_1_season, team_2_season)
expected_scores_2 = get_expected_scores(stats_2, stats_1, team_2_season, team_1_season)
var_1 = pd.Series(0.25*(variances_1.loc[['TF', 'PF', 'DF']].values + variances_2.loc[['TA', 'PA', 'DGA']].values), ['T', 'P', 'DG'])
var_2 = pd.Series(0.25*(variances_2.loc[['TF', 'PF', 'DF']].values + variances_1.loc[['TA', 'PA', 'DGA']].values), ['T', 'P', 'DG'])
for stat in var_1.index:
if math.isnan(var_1[stat]):
var_1[stat] = expected_scores_1[stat]
if math.isnan(var_2[stat]):
var_2[stat] = expected_scores_2[stat]
score_array = [5, 2, 3, 3]
n_sim = int(5e6)
expected_scores_1a = {'T': (expected_scores_1['T'], var_1['T']),
'C': expected_scores_1['CONPROB'],
'P': (expected_scores_1['P'], var_1['P']),
'DG': (expected_scores_1['DG'], var_1['DG'])}
expected_scores_2a = {'T': (expected_scores_2['T'], var_2['T']),
'C': expected_scores_2['CONPROB'],
'P': (expected_scores_2['P'], var_2['P']),
'DG': (expected_scores_2['DG'], var_2['DG'])}
print(expected_scores_1a)
print(expected_scores_2a)
ts = time.time()
(team_1_scores, team_1_tries) = get_score(expected_scores_1a, score_array, n_sim)
(team_2_scores, team_2_tries) = get_score(expected_scores_2a, score_array, n_sim)
te = time.time()
print(te - ts)
(team_1_wins, team_2_wins, draws) = sim_util.eval_results(team_1_scores, team_2_scores, False)
(team_1_tb, team_2_tb) = sim_util.eval_try_bonus(team_1_tries, team_2_tries, 3)
(team_1_lb, team_2_lb) = sim_util.eval_losing_bonus(team_1_scores, team_2_scores, 7)
team_1_prob = team_1_wins.mean()
team_2_prob = team_2_wins.mean()
draw_prob = draws.mean()
team_1_bpw_prob = (team_1_tb * team_1_wins).mean()
team_1_bpd_prob = (team_1_tb * draws).mean()
team_1_bpl_prob = (team_1_tb * team_2_wins).mean()
team_1_lbp_prob = (team_1_lb).mean()
team_2_bpw_prob = (team_2_tb * team_2_wins).mean()
team_2_bpd_prob = (team_2_tb * draws).mean()
team_2_bpl_prob = (team_2_tb * team_1_wins).mean()
team_2_lbp_prob = (team_2_lb).mean()
games = pd.DataFrame.from_items([(team_1, team_1_scores), (team_2, team_2_scores)])
pre_summaries = games.describe(percentiles = list(np.linspace(0.05, 0.95, 19)))
summaries = pd.DataFrame(columns = pre_summaries.columns)
summaries.loc['mean'] = pre_summaries.loc['mean']
for i in pre_summaries.index:
try:
percentile = int(round(float(i[:-1])))
summaries.loc['{}%'.format(percentile)] = pre_summaries.loc[i]
except ValueError:
continue
summaries = summaries.reset_index()
for item in summaries.index:
try:
summaries['index'][item] = str(int(float(summaries['index'][item][:-1]))) + '%'
except ValueError:
continue
bonus_points = pd.DataFrame(index = ['4-Try Bonus Point with Win',
'4-Try Bonus Point with Draw',
'4-Try Bonus Point with Loss',
'Losing Bonus Point'])
bonus_points[team_1] = [team_1_bpw_prob, team_1_bpd_prob, team_1_bpl_prob, team_1_lbp_prob]
bonus_points[team_2] = [team_2_bpw_prob, team_2_bpd_prob, team_2_bpl_prob, team_2_lbp_prob]
summaries = summaries.set_index('index')
summaries = summaries.groupby(level = 0).last()
output = {'ProbWin': {team_1: team_1_prob, team_2: team_2_prob}, 'Scores': summaries, 'Bonus Points': bonus_points}
print(team_1 + '/' + team_2 + ' score distributions computed in ' + str(round(time.time() - ts, 1)) + ' seconds')
return output | mit |
davidwaroquiers/pymatgen | pymatgen/command_line/vampire_caller.py | 2 | 15166 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an interface to the VAMPIRE code for atomistic
simulations of magnetic materials.
This module depends on a compiled vampire executable available in the path.
Please download at https://vampire.york.ac.uk/download/ and
follow the instructions to compile the executable.
If you use this module, please cite the following:
"Atomistic spin model simulations of magnetic nanomaterials."
R. F. L. Evans, W. J. Fan, P. Chureemart, T. A. Ostler, M. O. A. Ellis
and R. W. Chantrell. J. Phys.: Condens. Matter 26, 103202 (2014)
"""
import logging
import subprocess
import pandas as pd
from monty.dev import requires
from monty.json import MSONable
from monty.os.path import which
from pymatgen.analysis.magnetism.heisenberg import HeisenbergMapper
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "June 2019"
VAMPEXE = which("vampire-serial")
class VampireCaller:
"""
Run Vampire on a material with magnetic ordering and exchange parameter information to compute the critical
temperature with classical Monte Carlo.
"""
@requires(
VAMPEXE,
"VampireCaller requires vampire-serial to be in the path."
"Please follow the instructions at https://vampire.york.ac.uk/download/.",
)
def __init__(
self,
ordered_structures=None,
energies=None,
mc_box_size=4.0,
equil_timesteps=2000,
mc_timesteps=4000,
save_inputs=False,
hm=None,
avg=True,
user_input_settings=None,
):
"""
user_input_settings is a dictionary that can contain:
* start_t (int): Start MC sim at this temp, defaults to 0 K.
* end_t (int): End MC sim at this temp, defaults to 1500 K.
* temp_increment (int): Temp step size, defaults to 25 K.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
mc_box_size (float): x=y=z dimensions (nm) of MC simulation box
equil_timesteps (int): number of MC steps for equilibrating
mc_timesteps (int): number of MC steps for averaging
save_inputs (bool): if True, save scratch dir of vampire input files
hm (HeisenbergModel): object already fit to low energy
magnetic orderings.
avg (bool): If True, simply use <J> exchange parameter estimate.
If False, attempt to use NN, NNN, etc. interactions.
user_input_settings (dict): optional commands for VAMPIRE Monte Carlo
Parameters:
sgraph (StructureGraph): Ground state graph.
unique_site_ids (dict): Maps each site to its unique identifier
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
ex_params (dict): Exchange parameter values (meV/atom)
mft_t (float): Mean field theory estimate of critical T
mat_name (str): Formula unit label for input files
mat_id_dict (dict): Maps sites to material id # for vampire
indexing.
TODO:
* Create input files in a temp folder that gets cleaned up after run terminates
"""
self.mc_box_size = mc_box_size
self.equil_timesteps = equil_timesteps
self.mc_timesteps = mc_timesteps
self.save_inputs = save_inputs
self.avg = avg
if not user_input_settings: # set to empty dict
self.user_input_settings = {}
else:
self.user_input_settings = user_input_settings
# Get exchange parameters and set instance variables
if not hm:
hmapper = HeisenbergMapper(ordered_structures, energies, cutoff=3.0, tol=0.02)
hm = hmapper.get_heisenberg_model()
# Attributes from HeisenbergModel
self.hm = hm
self.structure = hm.structures[0] # ground state
self.sgraph = hm.sgraphs[0] # ground state graph
self.unique_site_ids = hm.unique_site_ids
self.nn_interactions = hm.nn_interactions
self.dists = hm.dists
self.tol = hm.tol
self.ex_params = hm.ex_params
self.javg = hm.javg
# Full structure name before reducing to only magnetic ions
self.mat_name = hm.formula
# Switch to scratch dir which automatically cleans up vampire inputs files unless user specifies to save them
# with ScratchDir('/scratch', copy_from_current_on_enter=self.save_inputs,
# copy_to_current_on_exit=self.save_inputs) as temp_dir:
# os.chdir(temp_dir)
# Create input files
self._create_mat()
self._create_input()
self._create_ucf()
# Call Vampire
process = subprocess.Popen(["vampire-serial"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout = stdout.decode()
if stderr:
vanhelsing = stderr.decode()
if len(vanhelsing) > 27: # Suppress blank warning msg
logging.warning(vanhelsing)
if process.returncode != 0:
raise RuntimeError("Vampire exited with return code {}.".format(process.returncode))
self._stdout = stdout
self._stderr = stderr
# Process output
nmats = max(self.mat_id_dict.values())
parsed_out, critical_temp = VampireCaller.parse_stdout("output", nmats)
self.output = VampireOutput(parsed_out, nmats, critical_temp)
def _create_mat(self):
structure = self.structure
mat_name = self.mat_name
magmoms = structure.site_properties["magmom"]
# Maps sites to material id for vampire inputs
mat_id_dict = {}
nmats = 0
for key in self.unique_site_ids:
spin_up, spin_down = False, False
nmats += 1 # at least 1 mat for each unique site
# Check which spin sublattices exist for this site id
for site in key:
m = magmoms[site]
if m > 0:
spin_up = True
if m < 0:
spin_down = True
# Assign material id for each site
for site in key:
m = magmoms[site]
if spin_up and not spin_down:
mat_id_dict[site] = nmats
if spin_down and not spin_up:
mat_id_dict[site] = nmats
if spin_up and spin_down:
# Check if spin up or down shows up first
m0 = magmoms[key[0]]
if m > 0 and m0 > 0:
mat_id_dict[site] = nmats
if m < 0 and m0 < 0:
mat_id_dict[site] = nmats
if m > 0 > m0:
mat_id_dict[site] = nmats + 1
if m < 0 < m0:
mat_id_dict[site] = nmats + 1
# Increment index if two sublattices
if spin_up and spin_down:
nmats += 1
mat_file = ["material:num-materials=%d" % (nmats)]
for key in self.unique_site_ids:
i = self.unique_site_ids[key] # unique site id
for site in key:
mat_id = mat_id_dict[site]
# Only positive magmoms allowed
m_magnitude = abs(magmoms[site])
if magmoms[site] > 0:
spin = 1
if magmoms[site] < 0:
spin = -1
atom = structure[i].species.reduced_formula
mat_file += ["material[%d]:material-element=%s" % (mat_id, atom)]
mat_file += [
"material[%d]:damping-constant=1.0" % (mat_id),
"material[%d]:uniaxial-anisotropy-constant=1.0e-24" % (mat_id), # xx - do we need this?
"material[%d]:atomic-spin-moment=%.2f !muB" % (mat_id, m_magnitude),
"material[%d]:initial-spin-direction=0,0,%d" % (mat_id, spin),
]
mat_file = "\n".join(mat_file)
mat_file_name = mat_name + ".mat"
self.mat_id_dict = mat_id_dict
with open(mat_file_name, "w") as f:
f.write(mat_file)
def _create_input(self):
structure = self.structure
mcbs = self.mc_box_size
equil_timesteps = self.equil_timesteps
mc_timesteps = self.mc_timesteps
mat_name = self.mat_name
input_script = ["material:unit-cell-file=%s.ucf" % (mat_name)]
input_script += ["material:file=%s.mat" % (mat_name)]
# Specify periodic boundary conditions
input_script += [
"create:periodic-boundaries-x",
"create:periodic-boundaries-y",
"create:periodic-boundaries-z",
]
# Unit cell size in Angstrom
abc = structure.lattice.abc
ucx, ucy, ucz = abc[0], abc[1], abc[2]
input_script += ["dimensions:unit-cell-size-x = %.10f !A" % (ucx)]
input_script += ["dimensions:unit-cell-size-y = %.10f !A" % (ucy)]
input_script += ["dimensions:unit-cell-size-z = %.10f !A" % (ucz)]
# System size in nm
input_script += [
"dimensions:system-size-x = %.1f !nm" % (mcbs),
"dimensions:system-size-y = %.1f !nm" % (mcbs),
"dimensions:system-size-z = %.1f !nm" % (mcbs),
]
# Critical temperature Monte Carlo calculation
input_script += [
"sim:integrator = monte-carlo",
"sim:program = curie-temperature",
]
# Default Monte Carlo params
input_script += [
"sim:equilibration-time-steps = %d" % (equil_timesteps),
"sim:loop-time-steps = %d" % (mc_timesteps),
"sim:time-steps-increment = 1",
]
# Set temperature range and step size of simulation
if "start_t" in self.user_input_settings:
start_t = self.user_input_settings["start_t"]
else:
start_t = 0
if "end_t" in self.user_input_settings:
end_t = self.user_input_settings["end_t"]
else:
end_t = 1500
if "temp_increment" in self.user_input_settings:
temp_increment = self.user_input_settings["temp_increment"]
else:
temp_increment = 25
input_script += [
"sim:minimum-temperature = %d" % (start_t),
"sim:maximum-temperature = %d" % (end_t),
"sim:temperature-increment = %d" % (temp_increment),
]
# Output to save
input_script += [
"output:temperature",
"output:mean-magnetisation-length",
"output:material-mean-magnetisation-length",
"output:mean-susceptibility",
]
input_script = "\n".join(input_script)
with open("input", "w") as f:
f.write(input_script)
def _create_ucf(self):
structure = self.structure
mat_name = self.mat_name
abc = structure.lattice.abc
ucx, ucy, ucz = abc[0], abc[1], abc[2]
ucf = ["# Unit cell size:"]
ucf += ["%.10f %.10f %.10f" % (ucx, ucy, ucz)]
ucf += ["# Unit cell lattice vectors:"]
a1 = list(structure.lattice.matrix[0])
ucf += ["%.10f %.10f %.10f" % (a1[0], a1[1], a1[2])]
a2 = list(structure.lattice.matrix[1])
ucf += ["%.10f %.10f %.10f" % (a2[0], a2[1], a2[2])]
a3 = list(structure.lattice.matrix[2])
ucf += ["%.10f %.10f %.10f" % (a3[0], a3[1], a3[2])]
nmats = max(self.mat_id_dict.values())
ucf += ["# Atoms num_materials; id cx cy cz mat cat hcat"]
ucf += ["%d %d" % (len(structure), nmats)]
# Fractional coordinates of atoms
for site, r in enumerate(structure.frac_coords):
# Back to 0 indexing for some reason...
mat_id = self.mat_id_dict[site] - 1
ucf += ["%d %.10f %.10f %.10f %d 0 0" % (site, r[0], r[1], r[2], mat_id)]
# J_ij exchange interaction matrix
sgraph = self.sgraph
ninter = 0
for i, node in enumerate(sgraph.graph.nodes):
ninter += sgraph.get_coordination_of_site(i)
ucf += ["# Interactions"]
ucf += ["%d isotropic" % (ninter)]
iid = 0 # counts number of interaction
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
dx = jimage[0]
dy = jimage[1]
dz = jimage[2]
j = c[2] # index of neighbor
dist = round(c[-1], 2)
# Look up J_ij between the sites
if self.avg is True: # Just use <J> estimate
j_exc = self.hm.javg
else:
j_exc = self.hm._get_j_exc(i, j, dist)
# Convert J_ij from meV to Joules
j_exc *= 1.6021766e-22
j_exc = str(j_exc) # otherwise this rounds to 0
ucf += ["%d %d %d %d %d %d %s" % (iid, i, j, dx, dy, dz, j_exc)]
iid += 1
ucf = "\n".join(ucf)
ucf_file_name = mat_name + ".ucf"
with open(ucf_file_name, "w") as f:
f.write(ucf)
@staticmethod
def parse_stdout(vamp_stdout, nmats):
"""Parse stdout from Vampire.
Args:
vamp_stdout (txt file): Vampire 'output' file.
nmats (int): Num of materials in Vampire sim.
Returns:
parsed_out (DataFrame): MSONable vampire output.
critical_temp (float): Calculated critical temp.
"""
names = ["T", "m_total"] + ["m_" + str(i) for i in range(1, nmats + 1)] + ["X_x", "X_y", "X_z", "X_m", "nan"]
# Parsing vampire MC output
df = pd.read_csv(vamp_stdout, sep="\t", skiprows=9, header=None, names=names)
df.drop("nan", axis=1, inplace=True)
parsed_out = df.to_json()
# Max of susceptibility <-> critical temp
critical_temp = df.iloc[df.X_m.idxmax()]["T"]
return parsed_out, critical_temp
class VampireOutput(MSONable):
"""
This class processes results from a Vampire Monte Carlo simulation
and returns the critical temperature.
"""
def __init__(self, parsed_out=None, nmats=None, critical_temp=None):
"""
Args:
parsed_out (json): json rep of parsed stdout DataFrame.
nmats (int): Number of distinct materials (1 for each specie and up/down spin).
critical_temp (float): Monte Carlo Tc result.
"""
self.parsed_out = parsed_out
self.nmats = nmats
self.critical_temp = critical_temp
| mit |
cgre-aachen/gempy | gempy/core/grid_modules/grid_types.py | 1 | 24018 | from gempy.core.grid_modules.create_topography import LoadDEMArtificial, LoadDEMGDAL
import numpy as np
import skimage.transform
import matplotlib.pyplot as plt
from scipy.constants import G
from scipy import interpolate
from gempy.utils.meta import _setdoc_pro
import gempy.utils.docstring as ds
import pandas as pn
class RegularGrid:
"""
Class with the methods and properties to manage 3D regular grids where the model will be interpolated.
Args:
extent (np.ndarray): [x_min, x_max, y_min, y_max, z_min, z_max]
resolution (np.ndarray): [nx, ny, nz]
Attributes:
extent (np.ndarray): [x_min, x_max, y_min, y_max, z_min, z_max]
resolution (np.ndarray): [nx, ny, nz]
values (np.ndarray): XYZ coordinates
mask_topo (np.ndarray, dtype=bool): same shape as values. Values above the topography are False
dx (float): size of the cells on x
dy (float): size of the cells on y
dz (float): size of the cells on z
"""
def __init__(self, extent=None, resolution=None, **kwargs):
self.resolution = np.ones((0, 3), dtype='int64')
self.extent = np.zeros(6, dtype='float64')
self.extent_r = np.zeros(6, dtype='float64')
self.values = np.zeros((0, 3))
self.values_r = np.zeros((0, 3))
self.mask_topo = np.zeros((0, 3), dtype=bool)
self.x = None
self.y = None
self.z = None
if extent is not None and resolution is not None:
self.set_regular_grid(extent, resolution)
self.dx, self.dy, self.dz = self.get_dx_dy_dz()
def set_coord(self, extent, resolution):
dx = (extent[1] - extent[0]) / resolution[0]
dy = (extent[3] - extent[2]) / resolution[1]
dz = (extent[5] - extent[4]) / resolution[2]
self.x = np.linspace(extent[0] + dx / 2, extent[1] - dx / 2, resolution[0],
dtype="float64")
self.y = np.linspace(extent[2] + dy / 2, extent[3] - dy / 2, resolution[1],
dtype="float64")
self.z = np.linspace(extent[4] + dz / 2, extent[5] - dz / 2, resolution[2],
dtype="float64")
return self.x, self.y, self.z
def create_regular_grid_3d(self, extent, resolution):
"""
Method to create a 3D regular grid where is interpolated
Args:
extent (list): [x_min, x_max, y_min, y_max, z_min, z_max]
resolution (list): [nx, ny, nz].
Returns:
numpy.ndarray: Unraveled 3D numpy array where every row correspond to the xyz coordinates of a regular grid
"""
coords = self.set_coord(extent, resolution)
g = np.meshgrid(*coords, indexing="ij")
values = np.vstack(tuple(map(np.ravel, g))).T.astype("float64")
return values
def get_dx_dy_dz(self, rescale=False):
if rescale is True:
dx = (self.extent_r[1] - self.extent_r[0]) / self.resolution[0]
dy = (self.extent_r[3] - self.extent_r[2]) / self.resolution[1]
dz = (self.extent_r[5] - self.extent_r[4]) / self.resolution[2]
else:
dx = (self.extent[1] - self.extent[0]) / self.resolution[0]
dy = (self.extent[3] - self.extent[2]) / self.resolution[1]
dz = (self.extent[5] - self.extent[4]) / self.resolution[2]
return dx, dy, dz
def set_regular_grid(self, extent, resolution):
"""
Set a regular grid into the values parameters for further computations
Args:
extent (list, np.ndarry): [x_min, x_max, y_min, y_max, z_min, z_max]
resolution (list, np.ndarray): [nx, ny, nz]
"""
self.extent = np.asarray(extent, dtype='float64')
self.resolution = np.asarray(resolution)
self.values = self.create_regular_grid_3d(extent, resolution)
self.length = self.values.shape[0]
self.dx, self.dy, self.dz = self.get_dx_dy_dz()
return self.values
def set_topography_mask(self, topography):
"""This method takes a topography grid of the same extent as the regular
grid and creates a mask of voxels
Args:
topography (:class:`gempy.core.grid_modules.topography.Topography`):
Returns:
"""
assert np.array_equal(topography.extent,
self.extent), 'The extent of' \
'the topography must match to the extent of the regular grid.'
# interpolate topography values to the regular grid
regular_grid_topo = skimage.transform.resize(
topography.values_2d,
(self.resolution[0], self.resolution[1]),
mode='constant',
anti_aliasing=False, preserve_range=True)
# Reshape the Z values of the regular grid to 3d
values_3d = self.values[:, 2].reshape(self.resolution)
if regular_grid_topo.ndim == 3:
regular_grid_topo_z = regular_grid_topo[:, :, [2]]
elif regular_grid_topo.ndim == 2:
regular_grid_topo_z = regular_grid_topo
else:
raise ValueError()
mask = np.greater(values_3d[:, :, :], regular_grid_topo_z)
self.mask_topo = mask
return self.mask_topo
class Sections:
"""
Object that creates a grid of cross sections between two points.
Args:
regular_grid: Model.grid.regular_grid
section_dict: {'section name': ([p1_x, p1_y], [p2_x, p2_y], [xyres, zres])}
"""
def __init__(self, regular_grid=None, z_ext=None, section_dict=None):
if regular_grid is not None:
self.z_ext = regular_grid.extent[4:]
else:
self.z_ext = z_ext
self.section_dict = section_dict
self.names = []
self.points = []
self.resolution = []
self.length = [0]
self.dist = []
self.df = pn.DataFrame()
self.df['dist'] = self.dist
self.values = []
self.extent = None
if section_dict is not None:
self.set_sections(section_dict)
def _repr_html_(self):
return self.df.to_html()
def __repr__(self):
return self.df.to_string()
def show(self):
pass
def set_sections(self, section_dict, regular_grid=None, z_ext=None):
self.section_dict = section_dict
if regular_grid is not None:
self.z_ext = regular_grid.extent[4:]
self.names = np.array(list(self.section_dict.keys()))
self.get_section_params()
self.calculate_all_distances()
self.df = pn.DataFrame.from_dict(self.section_dict, orient='index',
columns=['start', 'stop', 'resolution'])
self.df['dist'] = self.dist
self.compute_section_coordinates()
def get_section_params(self):
self.points = []
self.resolution = []
self.length = [0]
for i, section in enumerate(self.names):
points = [self.section_dict[section][0], self.section_dict[section][1]]
assert points[0] != points[
1], 'The start and end points of the section must not be identical.'
self.points.append(points)
self.resolution.append(self.section_dict[section][2])
self.length = np.append(self.length, self.section_dict[section][2][0] *
self.section_dict[section][2][1])
self.length = np.array(self.length).cumsum()
def calculate_all_distances(self):
self.coordinates = np.array(self.points).ravel().reshape(-1,
4) # axis are x1,y1,x2,y2
self.dist = np.sqrt(np.diff(self.coordinates[:, [0, 2]]) ** 2 + np.diff(
self.coordinates[:, [1, 3]]) ** 2)
@staticmethod
def distance_2_points(p1, p2):
return np.sqrt(np.diff((p1[0], p2[0])) ** 2 + np.diff((p1[1], p2[1])) ** 2)
def compute_section_coordinates(self):
for i in range(len(self.names)):
xy = self.calculate_line_coordinates_2points(self.coordinates[i, :2],
self.coordinates[i, 2:],
self.resolution[i][0])
zaxis = np.linspace(self.z_ext[0], self.z_ext[1], self.resolution[i][1],
dtype="float64")
X, Z = np.meshgrid(xy[:, 0], zaxis, indexing='ij')
Y, _ = np.meshgrid(xy[:, 1], zaxis, indexing='ij')
xyz = np.vstack((X.flatten(), Y.flatten(), Z.flatten())).T
if i == 0:
self.values = xyz
else:
self.values = np.vstack((self.values, xyz))
def generate_axis_coord(self):
for i, name in enumerate(self.names):
xy = self.calculate_line_coordinates_2points(
self.coordinates[i, :2],
self.coordinates[i, 2:],
self.resolution[i][0]
)
yield name, xy
def calculate_line_coordinates_2points(self, p1, p2, res):
if isinstance(p1, list):
p1 = np.array(p1)
if isinstance(p2, list):
p2 = np.array(p2)
v = p2 - p1 # vector pointing from p1 to p2
u = v / np.linalg.norm(v) # normalize it
distance = self.distance_2_points(p1, p2)
steps = np.linspace(0, distance, res)
values = p1.reshape(2, 1) + u.reshape(2, 1) * steps.ravel()
return values.T
def get_section_args(self, section_name: str):
where = np.where(self.names == section_name)[0][0]
return self.length[where], self.length[where + 1]
def get_section_grid(self, section_name: str):
l0, l1 = self.get_section_args(section_name)
return self.values[l0:l1]
@staticmethod
def interpolate_zvals_at_xy(xy, topography, method='interp2d'):
"""
Interpolates DEM values on a defined section
Args:
xy: x (EW) and y (NS) coordinates of the profile
topography (:class:`gempy.core.grid_modules.topography.Topography`)
method: interpolation method, 'interp2d' for cubic scipy.interpolate.interp2d
'spline' for scipy.interpolate.RectBivariateSpline
Returns:
numpy.ndarray: z values, i.e. topography along the profile
"""
xj = topography.values_2d[:, 0, 0]
yj = topography.values_2d[0, :, 1]
zj = topography.values_2d[:, :, 2]
if method == 'interp2d':
f = interpolate.interp2d(xj, yj, zj.T, kind='cubic')
zi = f(xy[:, 0], xy[:, 1])
if xy[:, 0][0] <= xy[:, 0][-1] and xy[:, 1][0] <= xy[:, 1][-1]:
return np.diag(zi)
else:
return np.flipud(zi).diagonal()
else:
assert xy[:, 0][0] <= xy[:, 0][
-1], 'The xy values of the first point must be smaller than second.' \
'Please use interp2d as method argument. Will be fixed.'
assert xy[:, 1][0] <= xy[:, 1][
-1], 'The xy values of the first point must be smaller than second.' \
'Please use interp2d as method argument. Will be fixed.'
f = interpolate.RectBivariateSpline(xj, yj, zj)
zi = f(xy[:, 0], xy[:, 1])
return np.flipud(zi).diagonal()
class CustomGrid:
"""Object that contains arbitrary XYZ coordinates.
Args:
custom_grid (numpy.ndarray like): XYZ (in columns) of the desired coordinates
Attributes:
values (np.ndarray): XYZ coordinates
"""
def __init__(self, custom_grid: np.ndarray):
self.values = np.zeros((0, 3))
self.set_custom_grid(custom_grid)
def set_custom_grid(self, custom_grid: np.ndarray):
"""
Give the coordinates of an external generated grid
Args:
custom_grid (numpy.ndarray like): XYZ (in columns) of the desired coordinates
Returns:
numpy.ndarray: Unraveled 3D numpy array where every row correspond to the xyz coordinates of a regular
grid
"""
custom_grid = np.atleast_2d(custom_grid)
assert type(custom_grid) is np.ndarray and custom_grid.shape[1] == 3, \
'The shape of new grid must be (n,3) where n is the number of' \
' points of the grid'
self.values = custom_grid
self.length = self.values.shape[0]
return self.values
class CenteredGrid:
"""
Logarithmic spaced grid.
"""
def __init__(self, centers=None, radius=None, resolution=None):
self.grid_type = 'centered_grid'
self.values = np.empty((0, 3))
self.length = self.values.shape[0]
self.resolution = resolution
self.kernel_centers = np.empty((0, 3))
self.kernel_dxyz_left = np.empty((0, 3))
self.kernel_dxyz_right = np.empty((0, 3))
self.tz = np.empty(0)
if centers is not None and radius is not None:
if resolution is None:
resolution = [10, 10, 20]
self.set_centered_grid(centers=centers, radius=radius,
resolution=resolution)
@staticmethod
@_setdoc_pro(ds.resolution)
def create_irregular_grid_kernel(resolution, radius):
"""
Create an isometric grid kernel (centered at 0)
Args:
resolution: [s0]
radius (float): Maximum distance of the kernel
Returns:
tuple: center of the voxel, left edge of each voxel (for xyz), right edge of each voxel (for xyz).
"""
if radius is not list or radius is not np.ndarray:
radius = np.repeat(radius, 3)
g_ = []
g_2 = []
d_ = []
for xyz in [0, 1, 2]:
if xyz == 2:
# Make the grid only negative for the z axis
g_.append(np.geomspace(0.01, 1, int(resolution[xyz])))
g_2.append(
(np.concatenate(([0], g_[xyz])) + 0.05) * - radius[xyz] * 1.2)
else:
g_.append(np.geomspace(0.01, 1, int(resolution[xyz] / 2)))
g_2.append(
np.concatenate((-g_[xyz][::-1], [0], g_[xyz])) * radius[xyz])
d_.append(np.diff(np.pad(g_2[xyz], 1, 'reflect', reflect_type='odd')))
g = np.meshgrid(*g_2)
d_left = np.meshgrid(d_[0][:-1] / 2, d_[1][:-1] / 2, d_[2][:-1] / 2)
d_right = np.meshgrid(d_[0][1:] / 2, d_[1][1:] / 2, d_[2][1:] / 2)
kernel_g = np.vstack(tuple(map(np.ravel, g))).T.astype("float64")
kernel_d_left = np.vstack(tuple(map(np.ravel, d_left))).T.astype("float64")
kernel_d_right = np.vstack(tuple(map(np.ravel, d_right))).T.astype("float64")
return kernel_g, kernel_d_left, kernel_d_right
@_setdoc_pro(ds.resolution)
def set_centered_kernel(self, resolution, radius):
"""
Set a centered
Args:
resolution: [s0]
radius (float): Maximum distance of the kernel
Returns:
"""
self.kernel_centers, self.kernel_dxyz_left, self.kernel_dxyz_right = self.create_irregular_grid_kernel(
resolution, radius)
return self.kernel_centers
@_setdoc_pro(ds.resolution)
def set_centered_grid(self, centers, kernel_centers=None, **kwargs):
"""
Main method of the class, set the XYZ values around centers using a kernel.
Args:
centers (np.array): XYZ array with the centers of where we want to create a grid around
kernel_centers (Optional[np.array]): center of the voxels of a desired kernel.
**kwargs:
* resolution: [s0]
* radius (float): Maximum distance of the kernel
Returns:
"""
self.values = np.empty((0, 3))
centers = np.atleast_2d(centers)
if kernel_centers is None:
kernel_centers = self.set_centered_kernel(**kwargs)
assert centers.shape[
1] == 3, 'Centers must be a numpy array that contains the coordinates XYZ'
for i in centers:
self.values = np.vstack((self.values, i + kernel_centers))
self.length = self.values.shape[0]
def set_tz_kernel(self, **kwargs):
if self.kernel_centers.size == 0:
self.set_centered_kernel(**kwargs)
grid_values = self.kernel_centers
s_gr_x = grid_values[:, 0]
s_gr_y = grid_values[:, 1]
s_gr_z = grid_values[:, 2]
# getting the coordinates of the corners of the voxel...
x_cor = np.stack((s_gr_x - self.kernel_dxyz_left[:, 0],
s_gr_x + self.kernel_dxyz_right[:, 0]), axis=1)
y_cor = np.stack((s_gr_y - self.kernel_dxyz_left[:, 1],
s_gr_y + self.kernel_dxyz_right[:, 1]), axis=1)
z_cor = np.stack((s_gr_z - self.kernel_dxyz_left[:, 2],
s_gr_z + self.kernel_dxyz_right[:, 2]), axis=1)
# ...and prepare them for a vectorial op
x_matrix = np.repeat(x_cor, 4, axis=1)
y_matrix = np.tile(np.repeat(y_cor, 2, axis=1), (1, 2))
z_matrix = np.tile(z_cor, (1, 4))
s_r = np.sqrt(x_matrix ** 2 + y_matrix ** 2 + z_matrix ** 2)
# This is the vector that determines the sign of the corner of the voxel
mu = np.array([1, -1, -1, 1, -1, 1, 1, -1])
self.tz = (
np.sum(- 1 *
G *
mu * (
x_matrix * np.log(y_matrix + s_r) +
y_matrix * np.log(x_matrix + s_r) -
z_matrix * np.arctan(
x_matrix * y_matrix / (z_matrix * s_r))),
axis=1))
return self.tz
# class Topography:
# """
# Object to include topography in the model.
# """
# def __init__(self, regular_grid):
# self.regular_grid = regular_grid
# self.values = np.zeros((0, 3))
#
# self.topo = None
# self.values_3D = np.zeros((0, 0, 0))
# self.extent = None
# self.resolution = None
#
# self.type = None
#
# def load_from_gdal(self, filepath):
# self.topo = Load_DEM_GDAL(filepath, self.regular_grid)
# self._create_init()
# self._fit2model()
# self.type = 'real'
#
# def load_random_hills(self, **kwargs):
# self.topo = LoadDEMArtificial(self.regular_grid, **kwargs)
# self._create_init()
# self._fit2model()
# self.type = 'artificial'
#
# def load_from_saved(self, filepath):
# assert filepath[-4:] == '.npy', 'The file must end on .npy'
# topo = np.load(filepath, allow_pickle=True)
# self.values_3D = topo[0]
# self.extent = topo[1]
# self.resolution = topo[2]
# self._fit2model()
# self.type = 'real'
#
# def _create_init(self):
# self.values_3D = self.topo.values_3D
# self.extent = self.topo.extent
# self.resolution = self.topo.resolution
#
# # These two methods makes more sense in regular grid passing a topography
# # object.
# def _fit2model(self):
# self.values = np.vstack((
# self.values_3D[:, :, 0].ravel(), self.values_3D[:, :, 1].ravel(),
# self.values_3D[:, :, 2].ravel())).T.astype("float64")
#
# if np.any(self.regular_grid.extent[:4] - self.extent) != 0:
# print('obacht')
# # todo if grid extent bigger fill missing values with nans for chloe
# self._crop()
#
# if np.any(self.regular_grid.resolution[:2] - self.resolution) != 0:
# self._resize()
# else:
# self.values_3D_res = self.values_3D
#
# self.regular_grid.mask_topo = self._create_grid_mask()
#
# def _crop(self):
# pass
#
# def _resize(self):
# self.values_3D_res = skimage.transform.resize(self.values_3D,
# (self.regular_grid.resolution[0], self.regular_grid.resolution[1]),
# mode='constant',
# anti_aliasing=False, preserve_range=True)
#
# def show(self):
# from gempy.plot.helpers import add_colorbar
# if self.type == 'artificial':
# fig, ax = plt.subplots()
# CS= ax.contour(self.values_3D[:, :, 2], extent=(self.extent[:4]), colors='k', linestyles='solid')
# ax.clabel(CS, inline=1, fontsize=10, fmt='%d')
# CS2 = ax.contourf(self.values_3D[:, :, 2], extent=(self.extent[:4]), cmap='terrain')
# add_colorbar(axes=ax, label='elevation [m]', cs=CS2)
# else:
# im = plt.imshow(np.flipud(self.values_3D[:,:,2]), extent=(self.extent[:4]))
# add_colorbar(im=im, label='elevation [m]')
# plt.axis('scaled')
# plt.xlabel('X')
# plt.ylabel('Y')
# plt.title('Model topography')
#
# def save(self, filepath):
# """
# Save the topography file in a numpy array which can be loaded later, to avoid the gdal process.
# Args:
# filepath (str): path where the array should be stored.
#
# Returns:
#
# """
# np.save(filepath, np.array([self.values_3D, self.extent, self.resolution]))
# print('saved')
#
# def _create_grid_mask(self):
# ind = self._find_indices()
# gridz = self.regular_grid.values[:, 2].reshape(*self.regular_grid.resolution).copy()
# for x in range(self.regular_grid.resolution[0]):
# for y in range(self.regular_grid.resolution[1]):
# z = ind[x, y]
# gridz[x, y, z:] = 99999
# mask = (gridz == 99999)
# return mask# np.multiply(np.full(self.regular_grid.values.shape, True).T, mask.ravel()).T
#
# def _find_indices(self):
# zs = np.linspace(self.regular_grid.extent[4], self.regular_grid.extent[5], self.regular_grid.resolution[2])
# dz = (zs[-1] - zs[0]) / len(zs)
# return ((self.values_3D_res[:, :, 2] - zs[0]) / dz + 1).astype(int)
#
# def interpolate_zvals_at_xy(self, xy, method='interp2d'):
# """
# Interpolates DEM values on a defined section
#
# Args:
# :param xy: x (EW) and y (NS) coordinates of the profile
# :param method: interpolation method, 'interp2d' for cubic scipy.interpolate.interp2d
# 'spline' for scipy.interpolate.RectBivariateSpline
# Returns:
# :return: z values, i.e. topography along the profile
# """
# xj = self.values_3D[:, :, 0][0, :]
# yj = self.values_3D[:, :, 1][:, 0]
# zj = self.values_3D[:, :, 2]
#
# if method == 'interp2d':
# f = interpolate.interp2d(xj, yj, zj, kind='cubic')
# zi = f(xy[:, 0], xy[:, 1])
# if xy[:, 0][0] <= xy[:, 0][-1] and xy[:, 1][0] <= xy[:, 1][-1]:
# return np.diag(zi)
# else:
# return np.flipud(zi).diagonal()
# else:
# assert xy[:, 0][0] <= xy[:, 0][-1], 'The xy values of the first point must be smaller than second.' \
# 'Please use interp2d as method argument. Will be fixed.'
# assert xy[:, 1][0] <= xy[:, 1][-1], 'The xy values of the first point must be smaller than second.' \
# 'Please use interp2d as method argument. Will be fixed.'
# f = interpolate.RectBivariateSpline(xj, yj, zj)
# zi = f(xy[:, 0], xy[:, 1])
# return np.flipud(zi).diagonal()
| lgpl-3.0 |
elkingtonmcb/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
bthirion/nistats | examples/02_first_level_models/plot_fiac_analysis.py | 1 | 6196 | """Simple example of two-session fMRI model fitting
================================================
Full step-by-step example of fitting a GLM to experimental data and visualizing
the results. This is done on two runs of one subject of the FIAC dataset.
For details on the data, please see:
Dehaene-Lambertz G, Dehaene S, Anton JL, Campagne A, Ciuciu P, Dehaene
G, Denghien I, Jobert A, LeBihan D, Sigman M, Pallier C, Poline
JB. Functional segregation of cortical language areas by sentence
repetition. Hum Brain Mapp. 2006: 27:360--371.
http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2653076#R11
More specifically:
1. A sequence of fMRI volumes are loaded
2. A design matrix describing all the effects related to the data is computed
3. a mask of the useful brain volume is computed
4. A GLM is applied to the dataset (effect/covariance,
then contrast estimation)
Technically, this example shows how to handle two sessions that
contain the same experimental conditions. The model directly returns a
fixed effect of the statistics across the two sessions.
"""
###############################################################################
# Create a write directory to work
# it will be a 'results' subdirectory of the current directory.
from os import mkdir, path, getcwd
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
mkdir(write_dir)
#########################################################################
# Prepare data and analysis parameters
# --------------------------------------
#
# Note that there are two sessions
from nistats import datasets
data = datasets.fetch_fiac_first_level()
fmri_img = [data['func1'], data['func2']]
#########################################################################
# Create a mean image for plotting purpose
from nilearn.image import mean_img
mean_img_ = mean_img(fmri_img[0])
#########################################################################
# The design matrices were pre-computed, we simply put them in a list of DataFrames
design_files = [data['design_matrix1'], data['design_matrix2']]
import pandas as pd
import numpy as np
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]
#########################################################################
# GLM estimation
# ----------------------------------
# GLM specification. Note that the mask was provided in the dataset. So we use it.
from nistats.first_level_model import FirstLevelModel
fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
#########################################################################
# GLM fitting
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)
#########################################################################
# Compute fixed effects of the two runs and compute related images
# For this, we first define the contrasts as we would do for a single session
n_columns = design_matrices[0].shape[1]
def pad_vector(contrast_, n_columns):
"""A small routine to append zeros in contrast vectors"""
return np.hstack((contrast_, np.zeros(n_columns - len(contrast_))))
#########################################################################
# Contrast specification
contrasts = {'SStSSp_minus_DStDSp': pad_vector([1, 0, 0, -1], n_columns),
'DStDSp_minus_SStSSp': pad_vector([-1, 0, 0, 1], n_columns),
'DSt_minus_SSt': pad_vector([-1, -1, 1, 1], n_columns),
'DSp_minus_SSp': pad_vector([-1, 1, -1, 1], n_columns),
'DSt_minus_SSt_for_DSp': pad_vector([0, -1, 0, 1], n_columns),
'DSp_minus_SSp_for_DSt': pad_vector([0, 0, -1, 1], n_columns),
'Deactivation': pad_vector([-1, -1, -1, -1, 4], n_columns),
'Effects_of_interest': np.eye(n_columns)[:5]}
#########################################################################
# Compute and plot statistics
from nilearn import plotting
print('Computing contrasts...')
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print(' Contrast % 2i out of %i: %s' % (
index + 1, len(contrasts), contrast_id))
# estimate the contasts
# note that the model implictly compute a fixed effects across the two sessions
z_map = fmri_glm.compute_contrast(
contrast_val, output_type='z_score')
# Write the resulting stat images to file
z_image_path = path.join(write_dir, '%s_z_map.nii.gz' % contrast_id)
z_map.to_filename(z_image_path)
#########################################################################
# Comparing session-specific and fixed effects.
# Here, we compare the activation mas produced from each separately then, the fixed effects version
contrast_id = 'Effects_of_interest'
#########################################################################
# Statistics for the first session
fmri_glm = fmri_glm.fit(fmri_img[0], design_matrices=design_matrices[0])
z_map = fmri_glm.compute_contrast(
contrasts[contrast_id], output_type='z_score')
plotting.plot_stat_map(
z_map, bg_img=mean_img_, threshold=3.0,
title='%s, first session' % contrast_id)
#########################################################################
# Statistics for the second session
fmri_glm = fmri_glm.fit(fmri_img[1], design_matrices=design_matrices[1])
z_map = fmri_glm.compute_contrast(
contrasts[contrast_id], output_type='z_score')
plotting.plot_stat_map(
z_map, bg_img=mean_img_, threshold=3.0,
title='%s, second session' % contrast_id)
#########################################################################
# Fixed effects statistics
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)
z_map = fmri_glm.compute_contrast(
contrasts[contrast_id], output_type='z_score')
plotting.plot_stat_map(
z_map, bg_img=mean_img_, threshold=3.0,
title='%s, fixed effects' % contrast_id)
#########################################################################
# Not unexpectedly, the fixed effects version looks displays higher peaks than the input sessions. Computing fixed effects enhances the signal-to-noise ratio of the resulting brain maps
plotting.show()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
kmike/scikit-learn | sklearn/metrics/__init__.py | 3 | 2839 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .metrics import (accuracy_score,
average_precision_score,
auc,
auc_score,
classification_report,
confusion_matrix,
explained_variance_score,
f1_score,
hamming_loss,
fbeta_score,
hinge_loss,
matthews_corrcoef,
mean_squared_error,
mean_absolute_error,
precision_recall_curve,
precision_recall_fscore_support,
precision_score,
recall_score,
r2_score,
roc_curve,
zero_one_loss)
# Will be removed in 0.15
from .metrics import zero_one
from .metrics import zero_one_score
from .scorer import Scorer, SCORERS
from . import cluster
from .cluster import (adjusted_rand_score,
adjusted_mutual_info_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
silhouette_score,
silhouette_samples,
v_measure_score)
from .pairwise import (euclidean_distances,
pairwise_distances,
pairwise_kernels)
__all__ = ['accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'auc_score',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'matthews_corrcoef',
'mean_squared_error',
'mean_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_curve',
'silhouette_score',
'silhouette_samples',
'v_measure_score',
'zero_one_loss',
'Scorer',
'SCORERS']
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
ElvisLouis/code | work/ML/tensorflow/separa/pyradbas/RBFN.py | 1 | 1611 | from scipy import *
from scipy.linalg import norm, pinv
import numpy as np
from matplotlib import pyplot as plt
class RBF:
def __init__(self, indim, numCenters, outdim):
self.indim = indim
self.outdim = outdim
self.numCenters = numCenters
self.centers = [random.uniform(-1, 1, indim) for i in xrange(numCenters)]
self.beta = 8
self.W = random.random((self.numCenters, self.outdim))
def _basisfunc(self, c, d):
assert len(d) == self.indim
return exp(-self.beta * norm(c - d) ** 2)
def _calcAct(self, X):
# calculate activations of RBFs
G = zeros((X.shape[0], self.numCenters), float)
for ci, c in enumerate(self.centers):
for xi, x in enumerate(X):
G[xi, ci] = self._basisfunc(c, x)
return G
def train(self, X, Y):
""" X: matrix of dimensions n x indim
y: column vector of dimension n x 1 """
# choose random center vectors from training set
rnd_idx = random.permutation(X.shape[0])[:self.numCenters]
self.centers = [X[i, :] for i in rnd_idx]
# print "center", self.centers
# calculate activations of RBFs
G = self._calcAct(X)
# print G
# calculate output weights (pseudoinverse)
self.W = dot(pinv(G), Y)
def test(self, X):
""" X: matrix of dimensions n x indim """
G = self._calcAct(X)
Y = dot(G, self.W)
return Y
if __name__ == '__main__':
n = 100
x = mgrid[-1:1:complex(0, n)].reshape(n, 1)
# set y and add random noise
y = sin(3 * (x + 0.5) ** 3 - 1)
# y += random.normal(0, 0.1, y.shape)
# rbf regression
rbf = RBF(1, 10, 1)
rbf.train(x, y)
z = rbf.test(x)
# print (z)
# print (y)
out = z - y
print (out.var())
| gpl-2.0 |
costypetrisor/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
chris-ch/omarket | python-lab/grid-johansen.py | 1 | 9270 | import csv
import logging
import argparse
import os
from datetime import timedelta
import pickle
import math
import itertools
import pandas
import numpy
import cointeg
_ETFS = (
'SCHF', 'AMJ', 'FEZ', 'EMB', 'EWC', 'UUP', 'EWY', 'IWF', 'HEDJ', 'VOO',
'BND', 'VEU', 'ITB', 'IWD', 'DBC', 'VTI', 'EWG', 'USMV', 'EWH', 'PGX',
'EPI', 'IEFA', 'AGG', 'KBE', 'VGK', 'DIA', 'IVV', 'PFF', 'EWW', 'VNQ',
'XME', 'XLB', 'BKLN', 'XLY', 'XRT', 'LQD', 'XBI', 'DXJ', 'IEMG', 'GLD',
'KRE', 'SLV', 'IYR', 'XLV', 'AMLP', 'VEA', 'XLK', 'IAU', 'RSX', 'XLI',
'JNK', 'HYG', 'XLE', 'XOP', 'VWO', 'XLP', 'XLU', 'FXI', 'EWZ', 'EFA',
'UNG', 'GDXJ', 'IWM', 'USO', 'EEM', 'GDX', 'SPY', 'XLF'
)
def load_security(path_db, exchange, symbol, excludes_func=None):
data_path = os.sep.join([path_db, exchange, symbol[0], symbol])
dataframes = list()
header = ('date', 'open', 'high', 'low', 'close', 'close_adj', 'volume')
date_parser = lambda x: pandas.datetime.strptime(x, '%Y%m%d')
description = open(os.sep.join([data_path, 'name.txt']), 'r').read()
if excludes_func:
if excludes_func(description):
logging.info('excluding: %s' % description)
return None
for root, dirs, files in os.walk(data_path):
for year_file in [f for f in files if f.endswith('.csv')]:
dataframe = pandas.read_csv(os.sep.join([root, year_file]),
names=header,
parse_dates=['date'],
date_parser=date_parser)
dataframe.set_index('date', inplace=True)
dataframes.append(dataframe)
return pandas.concat(dataframes, axis=0)
def list_securities(path):
for root, dirs, files in os.walk(path):
if len(path) >= 3 and len([f for f in files if f.endswith('.csv')]) > 0:
path = root.split(os.sep)
exchange = path[-3]
stock = path[-1]
yield exchange, stock
def prepare_data(eod_path):
"""
Filtering out unwanted ETFs (leveraged)
:param eod_path:
:return:
"""
securities = list()
for count, data in enumerate(list_securities(eod_path)):
exchange, stock = data
securities.append((exchange, stock))
close_df = pandas.DataFrame()
volume_df = pandas.DataFrame()
def excludes(name):
result = False
result |= 'X2' in name.upper()
result |= 'X3' in name.upper()
result |= '3X' in name.upper()
result |= '2X' in name.upper()
result |= 'ULTRA' in name.upper()
return result
for count, data in enumerate(securities):
exchange, stock = data
if not stock in _ETFS:
continue
eod_data = load_security(eod_path, exchange, stock, excludes_func=excludes)
if eod_data is not None:
close_df['%s/%s' % (exchange, stock)] = eod_data['close_adj']
volume_df['%s/%s' % (exchange, stock)] = eod_data['volume']
logging.info('processed: %s/%s %d/%d' % (exchange, stock, count + 1, len(securities)))
logging.info("result:\n%s" % close_df)
close_df.to_pickle(os.sep.join([eod_path, 'eod.pkl']))
volume_df.to_pickle(os.sep.join([eod_path, 'volume.pkl']))
def ncr(total, samples):
f = math.factorial
return f(total) // f(samples) // f(total - samples)
def generate_data(eod_path, number_securities=3):
close_df = pandas.read_pickle(os.sep.join([eod_path, 'eod.pkl']))
volume_df = pandas.read_pickle(os.sep.join([eod_path, 'volume.pkl']))
recent_history = volume_df.index.max() - timedelta(days=60)
median_volumes = volume_df[volume_df.index > recent_history].fillna(0).median()
most_traded = median_volumes[median_volumes > 1E5].keys().values.tolist()
columns = [column for column in most_traded if column.startswith('PCX/')]
logging.info('combining %d series' % len(columns))
def to_key(symbols):
return ','.join(symbols)
total = ncr(len(columns), number_securities)
logging.info('Possible combinations: %d' % total)
results = dict()
last_completed = None
for count, combination in enumerate(itertools.combinations(columns, number_securities)):
# Almost identical leads to untrade-able discrepancies
if 'PCX/IAU' and 'PCX/GLD' in combination:
continue
if int((count / total) * 100) % 5 == 0 and int((count / total) * 100) != last_completed:
last_completed = int((count / total) * 100)
logging.info('%d%% completed' % last_completed)
if to_key(combination) not in results:
current_df = close_df[list(combination)].dropna()
try:
result = cointeg.cointegration_johansen(current_df)
keepers = ('eigenvectors', 'trace_statistic', 'eigenvalue_statistics', 'critical_values_trace',
'critical_values_max_eigenvalue')
results[to_key(combination)] = dict((k, result[k]) for k in keepers if k in result)
except Exception as err:
logging.error('failed for combination: %s' % str(combination))
with open(os.sep.join([eod_path, 'results.pkl']), 'wb') as handle:
pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)
def analyse(eod_path):
all_eigenvalue_stats = list()
all_trace_stats = list()
with open(os.sep.join([eod_path, 'results.pkl']), 'rb') as handle:
results = pickle.load(handle)
total = len(results.keys())
for count, combination in enumerate(results.keys()):
stats = results[combination]
eigenvalue_stats = stats['eigenvalue_statistics']
trace_stats = stats['trace_statistic']
if trace_stats[0] > stats['critical_values_trace'][0][0] and eigenvalue_stats[0] > stats['critical_values_max_eigenvalue'][0][0]:
all_eigenvalue_stats.append((eigenvalue_stats[0], combination, results[combination]))
all_trace_stats.append((eigenvalue_stats[0], combination, results[combination]))
if count % 10000 == 9999:
logging.info('processed %d / %d' % (count + 1, total))
all_trace_stats.sort(key=lambda x: -x[0])
trace_df = pandas.DataFrame(all_trace_stats)
logging.info(trace_df.describe())
result_file = os.sep.join(['..', '..', 'data', 'results.csv'])
logging.info('writing best results to: %s' % os.path.abspath(result_file))
with open(result_file, mode='w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
for count in range(len(all_trace_stats)):
stat, portfolio, result = all_trace_stats[count]
portfolio = [name.split('/')[1] for name in portfolio.replace("'", '').replace('(', '').replace(')', '').split(',')]
weights = result['eigenvectors'].transpose()[0]
norm_weights = [round(x, 2) for x in numpy.divide(weights, numpy.absolute(weights).min()).tolist()]
logging.debug('portfolio: %s' % str(portfolio))
logging.debug('normalized weights: %s' % norm_weights)
logging.debug('stat trace: %s' % result['trace_statistic'][0])
logging.debug('critical trace : %s' % result['critical_values_trace'][0][0])
logging.debug('stat eigenvalue: %s' % result['eigenvalue_statistics'][0])
logging.debug('critical eigenvalue : %s' % result['critical_values_max_eigenvalue'][0][0])
stat_fields = result['trace_statistic'][0], result['critical_values_trace'][0][0], result['eigenvalue_statistics'][0], result['critical_values_max_eigenvalue'][0][0]
csvwriter.writerow(portfolio + norm_weights + list(stat_fields))
def main(args):
eod_path = args.eod_path
logging.info('using eod path: %s' % os.path.abspath(eod_path))
if eod_path is None:
eod_path = '.'
if args.prepare:
prepare_data(eod_path)
if args.generate:
generate_data(eod_path, args.number_securities)
if args.analyse:
analyse(eod_path)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('grid-johansen.log', mode='w')
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_handler)
logging.info('starting script')
parser = argparse.ArgumentParser(description='Grid using johansen test.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--eod-path', type=str, help='path to eod data')
parser.add_argument('--number-securities', type=int, help='number of securities', default=3)
parser.add_argument('--prepare', dest='prepare', action='store_true', help='preparing data')
parser.add_argument('--generate', dest='generate', action='store_true', help='generating results')
parser.add_argument('--analyse', dest='analyse', action='store_true', help='analysing results')
args = parser.parse_args()
main(args)
| apache-2.0 |
levibostian/myBlanky | googleAppEngine/google/appengine/api/appinfo.py | 1 | 64057 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""AppInfo tools.
Library for working with AppInfo records in memory, store and load from
configuration files.
"""
import logging
import os
import re
import string
import sys
import wsgiref.util
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.api import appinfo_errors
from google.appengine.api import backendinfo
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'.+'
_URL_ROOT_REGEX = r'/.*'
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
_PAGE_NAME_REGEX = r'^.+$'
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
APP_ID_MAX_LEN = 100
MODULE_ID_MAX_LEN = 63
MODULE_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
PARTITION_SEPARATOR = '~'
DOMAIN_SEPARATOR = ':'
VERSION_SEPARATOR = '.'
MODULE_SEPARATOR = ':'
DEFAULT_MODULE = 'default'
PARTITION_RE_STRING_WITHOUT_SEPARATOR = (r'[a-z\d\-]{1,%d}' % APP_ID_MAX_LEN)
PARTITION_RE_STRING = (r'%s\%s' %
(PARTITION_RE_STRING_WITHOUT_SEPARATOR,
PARTITION_SEPARATOR))
DOMAIN_RE_STRING_WITHOUT_SEPARATOR = (r'(?!\-)[a-z\d\-\.]{1,%d}' %
APP_ID_MAX_LEN)
DOMAIN_RE_STRING = (r'%s%s' %
(DOMAIN_RE_STRING_WITHOUT_SEPARATOR, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
MODULE_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (MODULE_ID_MAX_LEN - 1)
MODULE_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(MODULE_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
_INSTANCES_REGEX = r'^[1-9][\d]*$'
_INSTANCE_CLASS_REGEX = r'^([fF](1|2|4|4_1G)|[bB](1|2|4|8|4_1G))$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'[a-z][a-z0-9]{0,29}'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
DATASTORE_ID_POLICY_LEGACY = 'legacy'
DATASTORE_ID_POLICY_DEFAULT = 'default'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
SKIP_NO_FILES = r'(?!)'
DEFAULT_NOBUILD_FILES = (r'^$')
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
REDIRECT_HTTP_RESPONSE_CODE = 'redirect_http_response_code'
APPLICATION = 'application'
MODULE = 'module'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
VM_HEALTH_CHECK = 'vm_health_check'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
API_VERSION = 'api_version'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
DATASTORE_AUTO_ID_POLICY = 'auto_id_policy'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
PAGESPEED = 'pagespeed'
INSTANCE_CLASS = 'instance_class'
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
MIN_NUM_INSTANCES = 'min_num_instances'
MAX_NUM_INSTANCES = 'max_num_instances'
COOL_DOWN_PERIOD_SEC = 'cool_down_period_sec'
CPU_UTILIZATION = 'cpu_utilization'
CPU_UTILIZATION_UTILIZATION = 'target_utilization'
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC = 'aggregation_window_length_sec'
INSTANCES = 'instances'
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
PAGES = 'pages'
NAME = 'name'
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
ENABLE_HEALTH_CHECK = 'enable_health_check'
CHECK_INTERVAL_SEC = 'check_interval_sec'
TIMEOUT_SEC = 'timeout_sec'
UNHEALTHY_THRESHOLD = 'unhealthy_threshold'
HEALTHY_THRESHOLD = 'healthy_threshold'
RESTART_THRESHOLD = 'restart_threshold'
HOST = 'host'
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
default_version=None,
deprecated_versions=None,
experimental_versions=None):
"""Initializer for _VersionedLibrary.
Args:
name: The name of the library e.g. "django".
url: The URL for the library's project page e.g.
"http://www.djangoproject.com/".
description: A short description of the library e.g. "A framework...".
supported_versions: A list of supported version names ordered by release
date e.g. ["v1", "v2", "v3"].
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime or None if the library is not available by
default e.g. "v1".
deprecated_versions: A list of the versions of the library that have been
deprecated e.g. ["v1", "v2"].
experimental_versions: A list of the versions of the library that are
current experimental e.g. ["v1"].
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
@property
def non_deprecated_versions(self):
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5'],
experimental_versions=['1.5'],
),
_VersionedLibrary(
'endpoints',
'https://developers.google.com/appengine/docs/python/endpoints/',
'Libraries for building APIs in an App Engine application.',
['1.0']),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6']),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3', '2.3.5'],
experimental_versions=['2.3.5'],
),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15']),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
experimental_versions=['1.2.0'],
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4'],
experimental_versions=['1.2.4b4']
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1']),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7']),
_VersionedLibrary(
'protorpc',
'https://code.google.com/p/google-protorpc/',
'A framework for implementing HTTP-based remote procedure call (RPC) '
'services.',
['1.0'],
default_version='1.0',
),
_VersionedLibrary(
'PyAMF',
'http://www.pyamf.org/',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1']),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptogoogle.appengine._internal.graphy functions such as random number generation.',
['2.3', '2.6'],
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11']),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7'],
experimental_versions=['2.7']),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
default_version='2.3',
deprecated_versions=['2.3']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
default_version='1.1.1',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
REQUIRED_LIBRARIES = {
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.1.1'): [('numpy', '1.6.1')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
}
_USE_VERSION_FORMAT = ('use one of: "%s" or "latest" '
'("latest" recommended for development only)')
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
_MAX_COOKIE_LENGTH = 4096
_MAX_URL_LENGTH = 2047
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values what http_headers allows.
http_headers is an static handler key i.e. it applies to handlers with
static_dir or static_files keys. An example of how http_headers is used is
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in HttpHeadersDict i.e. header names are valid.
An instance is used as HttpHeadersDict's KEY_VALIDATOR.
"""
def Validate(self, name, unused_key=None):
"""Returns argument, or raises an exception if it is invalid.
HTTP header names are defined by RFC 2616 section 4.2.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: argument cannot be used as an HTTP
header name.
"""
original_name = name
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in HttpHeadersDict i.e. header values are valid.
An instance is used as HttpHeadersDict's VALUE_VALIDATOR.
"""
def Validate(self, value, key=None):
"""Returns value, or raises an exception if it is invalid.
According to RFC 2616 section 4.2, header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string".
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: argument cannot be used as an
HTTP header value.
"""
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
key = key.lower()
printable = set(string.printable[:-5])
if not all(char in printable for char in value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to header_name. If more than one such
value is in self, one of the values is selected arbitrarily, and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
"""Mapping from URLs to handlers.
This class acts like something of a union type. Its purpose is to
describe a mapping between a set of URLs and their handlers. What
handler type a given instance has is determined by which handler-id
attribute is used.
Each mapping can have one and only one handler type. Attempting to
use more than one handler-id attribute will cause an UnknownHandlerType
to be raised during validation. Failure to provide any handler-id
attributes will cause MissingHandlerType to be raised during validation.
The regular expression used by the url field will be used to match against
the entire URL path and query string of the request. This means that
partial maps will not be matched. Specifying a url, say /admin, is the
same as matching against the regular expression '^/admin$'. Don't begin
your matching url with ^ or end them with $. These regular expressions
won't be accepted and will raise ValueError.
Attributes:
login: Whether or not login is required to access URL. Defaults to
'optional'.
secure: Restriction on the protocol which can be used to serve
this URL/handler (HTTP, HTTPS or either).
url: Regular expression used to fully match against the request URLs path.
See Special Cases for using static_dir.
static_files: Handler id attribute that maps URL to the appropriate
file. Can use back regex references to the string matched to url.
upload: Regular expression used by the application configuration
program to know which files are uploaded as blobs. It's very
difficult to determine this using just the url and static_files
so this attribute must be included. Required when defining a
static_files mapping.
A matching file name must fully match against the upload regex, similar
to how url is matched against the request path. Do not begin upload
with ^ or end it with $.
static_dir: Handler id that maps the provided url to a sub-directory
within the application directory. See Special Cases.
mime_type: When used with static_files and static_dir the mime-type
of files served from those directories are overridden with this
value.
script: Handler id that maps URLs to scipt handler within the application
directory that will run using CGI.
position: Used in AppInclude objects to specify whether a handler
should be inserted at the beginning of the primary handler list or at the
end. If 'tail' is specified, the handler is inserted at the end,
otherwise, the handler is inserted at the beginning. This means that
'head' is the effective default.
expiration: When used with static files and directories, the time delta to
use for cache expiration. Has the form '4d 5h 30m 15s', where each letter
signifies days, hours, minutes, and seconds, respectively. The 's' for
seconds may be omitted. Only one amount must be specified, combining
multiple amounts is optional. Example good values: '10', '1d 6h',
'1h 30m', '7d 7d 7d', '5m 30'.
api_endpoint: Handler id that identifies endpoint as an API endpoint,
calls that terminate here will be handled by the api serving framework.
Special cases:
When defining a static_dir handler, do not use a regular expression
in the url attribute. Both the url and static_dir attributes are
automatically mapped to these equivalents:
<url>/(.*)
<static_dir>/\1
For example:
url: /images
static_dir: images_folder
Is the same as this static_files declaration:
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
REDIRECT_HTTP_RESPONSE_CODE: validation.Optional(validation.Options(
'301', '302', '303', '307')),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([
URL, LOGIN, AUTH_FAIL_ACTION, SECURE, REDIRECT_HTTP_RESPONSE_CODE])
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Get handler for mapping.
Returns:
Value of the handler (determined by handler id attribute).
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Get handler type of mapping.
Returns:
Handler type determined by which handler id attribute is set.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a
required attribute for its handler type.
MissingHandlerAttribute: when a URL handler is missing an attribute
"""
if getattr(self, HANDLER_API_ENDPOINT) is not None:
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
if getattr(self, id_field) is not None:
mapping_type = id_field
break
else:
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure handler has correct fields.
In addition to normal ValidatedCheck calls GetHandlerType
which validates all the handler fields are configured
properly.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: when mime_type is inconsistent with
http_headers.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that self.http_headers is consistent with self.mime_type.
Assumes self is a static handler i.e. either self.static_dir or
self.static_files is set (to not None).
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: Raised when
self.http_headers contains a Content-Type header, and self.mime_type is
set. For example, the following configuration would be rejected:
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
http_headers and mime_type specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Force omitted 'secure: ...' handler fields to 'secure: optional'.
The effect is that handler.secure is never equal to the (nominal)
default.
See http://b/issue?id=2073962.
"""
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See:
https://developers.google.com/appengine/docs/python/config/appconfig#Reserved_URLs
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
Raises:
PositionUsedInAppYamlHandler: when position attribute is specified for an
app.yaml file instead of an include.yaml file.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing admin console page in AdminConsole object.
"""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing admin console directives in application info.
"""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Return the result of merging two AdminConsole objects."""
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info.
"""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing builtin handler directives in application info.
Permits arbitrary keys but their values must be described by the
validation.Options object returned by ATTRIBUTES.
"""
class DynamicAttributes(dict):
"""Provide a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any get operation. The fixed
value passed in as a constructor parameter should be a
validation.Validated object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensure that all BuiltinHandler objects at least have attribute 'default'.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Permit ATTRIBUTES.iteritems() to return set of items that have values.
Whenever validate calls iteritems(), it is always called on ATTRIBUTES,
not on __dict__, so this override is important to ensure that functions
such as ToYAML() return the correct set of keys.
Raises:
MultipleBuiltinsSpecified: when more than one builtin is defined in a list
element.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError
return None
def ToDict(self):
"""Convert BuiltinHander object to a dictionary.
Returns:
dictionary of the form: {builtin_handler_name: on/off}
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Find if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: list of BuiltinHandler objects (typically yaml.builtins)
builtin_name: name of builtin to find whether or not it is defined
Returns:
true if builtin_name is defined by a member of builtins_list,
false otherwise
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of BuiltinHandler objects to a list of (name, status)."""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verify that all BuiltinHandler objects are valid and not repeated.
Args:
builtins_list: list of BuiltinHandler objects to validate.
runtime: if set then warnings are generated for builtins that have been
deprecated in the given runtime.
Raises:
InvalidBuiltinFormat: if the name of a Builtinhandler object
cannot be determined.
DuplicateBuiltinsSpecified: if a builtin handler name is used
more than once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing api_config handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Raises if the library configuration is not valid."""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version != 'latest':
if self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
logging.warning(
('%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
class CpuUtilization(validation.Validated):
"""Class representing the configuration of VM CPU utilization."""
ATTRIBUTES = {
CPU_UTILIZATION_UTILIZATION: validation.Optional(
validation.Range(1e-6, 1.0, float)),
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC: validation.Optional(
validation.Range(1, sys.maxint)),
}
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST: validation.Optional(
_CONCURRENT_REQUESTS_REGEX),
MIN_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
MAX_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
COOL_DOWN_PERIOD_SEC: validation.Optional(
validation.Range(60, sys.maxint, int)),
CPU_UTILIZATION: validation.Optional(CpuUtilization),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
We don't validate these further because the feature is in flux.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
result_vm_settings = (vm_settings_two or {}).copy()
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, env_variables_one, env_variables_two):
"""Merges to EnvironmentVariables instances.
Args:
env_variables_one: The first EnvironmentVariables instance or None.
env_variables_two: The second EnvironmentVariables instance or None.
Returns:
The merged EnvironmentVariables instance, or None if both input instances
are None or empty.
If a variable is specified by both instances, the value from
env_variables_two is used.
"""
result_env_variables = (env_variables_one or {}).copy()
result_env_variables.update(env_variables_two or {})
return (EnvironmentVariables(**result_env_variables)
if result_env_variables else None)
def VmSafeSetRuntime(appyaml, runtime):
"""Sets the runtime while respecting vm runtimes rules for runtime settings.
Args:
appyaml: AppInfoExternal instance, which will be modified.
runtime: The runtime to use.
Returns:
The passed in appyaml (which has been modified).
"""
if appyaml.vm:
if not appyaml.vm_settings:
appyaml.vm_settings = VmSettings()
appyaml.vm_settings['vm_runtime'] = runtime
appyaml.runtime = 'vm'
else:
appyaml.runtime = runtime
return appyaml
def NormalizeVmSettings(appyaml):
"""Normalize Vm settings.
Args:
appyaml: AppInfoExternal instance.
Returns:
Normalized app yaml.
"""
if appyaml.vm:
if not appyaml.vm_settings:
appyaml.vm_settings = VmSettings()
if 'vm_runtime' not in appyaml.vm_settings:
appyaml = VmSafeSetRuntime(appyaml, appyaml.runtime)
return appyaml
class VmHealthCheck(validation.Validated):
"""Class representing the configuration of VM health check."""
ATTRIBUTES = {
ENABLE_HEALTH_CHECK: validation.Optional(validation.TYPE_BOOL),
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxint)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxint)),
UNHEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
RESTART_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HOST: validation.Optional(validation.TYPE_STR)}
class AppInclude(validation.Validated):
"""Class representing the contents of an included app.yaml file.
Used for both builtins and includes directives.
"""
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
SKIP_FILES: validation.RegexStr(default=SKIP_NO_FILES),
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of <manual_scaling.instances> from the args.
Note that appinclude_one is mutated to be the merged result in this process.
Also, this function needs to be updated if ManualScaling gets additional
fields.
Args:
appinclude_one: object one to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
appinclude_two: object two to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
Returns:
Object that is the result of merging
appinclude_one.manual_scaling.instances and
appinclude_two.manual_scaling.instances. I.e., <appinclude_one>
after the mutations are complete.
"""
def _Instances(appinclude):
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def _CommonMergeOps(cls, one, two):
"""This function performs common merge operations."""
AppInclude.MergeManualScaling(one, two)
one.admin_console = AdminConsole.Merge(one.admin_console,
two.admin_console)
one.vm = two.vm or one.vm
one.vm_settings = VmSettings.Merge(one.vm_settings,
two.vm_settings)
one.env_variables = EnvironmentVariables.Merge(one.env_variables,
two.env_variables)
one.skip_files = cls.MergeSkipFiles(one.skip_files, two.skip_files)
return one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""This function merges an app.yaml file with referenced builtins/includes.
"""
if not appinclude:
return appyaml
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
h.position = None
appyaml.handlers.extend(tail)
appyaml = cls._CommonMergeOps(appyaml, appinclude)
return NormalizeVmSettings(appyaml)
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""This function merges the non-referential state of the provided AppInclude
objects. That is, builtins and includes directives are not preserved, but
any static objects are copied into an aggregate AppInclude object that
preserves the directives of both provided AppInclude objects.
Note that appinclude_one is mutated to be the merged result in this process.
Args:
appinclude_one: object one to merge
appinclude_two: object two to merge
Returns:
AppInclude object that is the result of merging the static directives of
appinclude_one and appinclude_two. I.e., <appinclude_one> after the
mutations are complete.
"""
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
return cls._CommonMergeOps(appinclude_one, appinclude_two)
@staticmethod
def MergeSkipFiles(skip_files_one, skip_files_two):
if skip_files_one == SKIP_NO_FILES:
return skip_files_two
if skip_files_two == SKIP_NO_FILES:
return skip_files_one
return validation.RegexStr().Validate(
[skip_files_one, skip_files_two], SKIP_FILES)
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a yaml_object builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language.
For example we specify "php-quercus" if this is a Java app
that was generated from PHP source using Quercus
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific 'expiration' set.
See the URLMap.expiration field's documentation for more information.
skip_files: An re object. Files that match this regular expression will
not be uploaded by appcfg.py. For example:
skip_files: |
.svn.*|
#.*#
nobuild_files: An re object. Files that match this regular expression will
not be built into the app. Go only.
api_config: URL root and script/servlet path for enhanced api serving
"""
ATTRIBUTES = {
APPLICATION: validation.Optional(APPLICATION_RE_STRING),
MODULE: validation.Optional(MODULE_ID_RE_STRING),
VERSION: validation.Optional(MODULE_VERSION_ID_RE_STRING),
RUNTIME: RUNTIME_RE_STRING,
API_VERSION: API_VERSION_RE_STRING,
INSTANCE_CLASS: validation.Optional(_INSTANCE_CLASS_REGEX),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
VM_HEALTH_CHECK: validation.Optional(VmHealthCheck),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
DATASTORE_AUTO_ID_POLICY: validation.Optional(
validation.Options(DATASTORE_ID_POLICY_LEGACY,
DATASTORE_ID_POLICY_DEFAULT)),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
PAGESPEED: validation.Optional(pagespeedinfo.PagespeedEntry),
}
_skip_runtime_checks = False
def CheckInitialized(self):
"""Performs non-regex-based validation.
The following are verified:
- At least one url mapping is provided in the URL mappers.
- Number of url mappers doesn't exceed MAX_URL_MAPS.
- Major version does not contain the string -dot-.
- If api_endpoints are defined, an api_config stanza must be defined.
- If the runtime is python27 and threadsafe is set, then no CGI handlers
can be used.
- That the version name doesn't start with BUILTIN_NAME_PREFIX
- If redirect_http_response_code exists, it is in the list of valid 300s.
Raises:
DuplicateLibrary: if the name library name is specified more than once.
MissingURLMapping: if no URLMap object is present in the object.
TooManyURLMappings: if there are too many URLMap entries.
MissingApiConfig: if api_endpoints exist without an api_config.
MissingThreadsafe: if threadsafe is not set but the runtime requires it.
ThreadsafeWithCgiHandler: if the runtime is python27, threadsafe is set
and CGI handlers are specified.
TooManyScalingSettingsError: if more than one scaling settings block is
present.
RuntimeDoesNotSupportLibraries: if libraries clause is used for a runtime
that does not support it (e.g. python25).
"""
super(AppInfoExternal, self).CheckInitialized()
if not self.handlers and not self.builtins and not self.includes:
raise appinfo_errors.MissingURLMapping(
'No URLMap entries found in application configuration')
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
if (self.threadsafe is None and
self.runtime == 'python27' and
not self._skip_runtime_checks):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to either "yes" or "no"')
if self.auto_id_policy == DATASTORE_ID_POLICY_LEGACY:
datastore_auto_ids_url = ('http://developers.google.com/'
'appengine/docs/python/datastore/'
'entities#Kinds_and_Identifiers')
appcfg_auto_ids_url = ('http://developers.google.com/appengine/docs/'
'python/config/appconfig#auto_id_policy')
logging.warning(
"You have set the datastore auto_id_policy to 'legacy'. It is "
"recommended that you select 'default' instead.\n"
"Legacy auto ids are deprecated. You can continue to allocate\n"
"legacy ids manually using the allocate_ids() API functions.\n"
"For more information see:\n"
+ datastore_auto_ids_url + '\n' + appcfg_auto_ids_url + '\n')
if self.libraries:
vm_runtime_python27 = (
self.runtime == 'vm' and
hasattr(self, 'vm_settings') and
self.vm_settings['vm_runtime'] == 'python27')
if not self._skip_runtime_checks and not (
vm_runtime_python27 or self.runtime == 'python27'):
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if (self.threadsafe and
self.runtime == 'python27' and
not self._skip_runtime_checks):
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all Library instances active for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries as well as any required dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized Library instances for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries, their required dependencies as well as any
libraries enabled by default. Any libraries with "latest" as their version
will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
for library in libraries:
if library.version == 'latest':
library.version = _NAME_TO_SUPPORTED_LIBRARY[
library.name].supported_versions[-1]
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the AppInfoExternal.
Backend entries may contain directives that modify other parts of the
app.yaml, such as the 'start' directive, which adds a handler for the start
request. This method performs those modifications.
Args:
backend_name: The name of a backend defined in 'backends'.
Raises:
BackendNotFound: if the indicated backend was not listed in 'backends'.
DuplicateBackend: if backend is found more than once in 'backends'.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def GetEffectiveRuntime(self):
"""Returns the app's runtime, resolving VMs to the underlying vm_runtime.
Returns:
The effective runtime: the value of vm_settings.vm_runtime if runtime is
"vm", or runtime otherwise.
"""
if self.runtime == 'vm' and hasattr(self, 'vm_settings'):
return self.vm_settings.get('vm_runtime')
return self.runtime
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (URLMap) objects.
Args:
handlers: A list of a handler (URLMap) objects.
is_include_file: If true, indicates the we are performing validation
for handlers in an AppInclude file, which may contain special directives.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Load a single AppInfo object where one and only one is expected.
Args:
app_info: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInfoExternal as loaded from a YAML file.
Raises:
ValueError: if a specified service is not valid.
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
DuplicateBackend: if backend is found more than once in 'backends'.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
return NormalizeVmSettings(appyaml)
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
It is used to pass back information about the newly created app to users
after a new version has been created.
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: MODULE_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Load a single AppInclude object where one and only one is expected.
Args:
app_include: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInclude as loaded from a YAML file.
Raises:
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches _DELTA_REGEX.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
_file_path_positive_re = re.compile(r'^[ 0-9a-zA-Z\._\+/@\$-]{1,256}$')
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/|^/')
_file_path_negative_2_re = re.compile(r'//|/$')
_file_path_negative_3_re = re.compile(r'^ | $|/ | /')
def ValidFilename(filename):
"""Determines if filename is valid.
filename must be a valid pathname.
- It must contain only letters, numbers, @, _, +, /, $, ., and -.
- It must be less than 256 chars.
- It must not contain "/./", "/../", or "//".
- It must not end in "/".
- All spaces must be in the middle of a directory or file name.
Args:
filename: The filename to validate.
Returns:
An error string if the filename is invalid. Returns '' if the filename
is valid.
"""
if _file_path_positive_re.match(filename) is None:
return 'Invalid character in filename: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
| mit |
pbrod/scipy | scipy/interpolate/tests/test_rbf.py | 14 | 4604 | # Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
BoltzmannBrain/nupic.research | projects/sequence_prediction/continuous_sequence/plotPerturbResult.py | 2 | 9219 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from matplotlib import pyplot as plt
plt.ion()
from errorMetrics import *
import pandas as pd
import numpy as np
from pylab import rcParams
from plot import ExperimentResult, plotAccuracy, computeSquareDeviation, computeLikelihood
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
import plotly.plotly as py
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
rcParams.update({'figure.figsize': (12, 6)})
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
window = 960
figPath = './result/'
plt.close('all')
# use datetime as x-axis
dataSet = 'nyc_taxi'
filePath = './data/' + dataSet + '.csv'
data = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['datetime', 'value', 'timeofday', 'dayofweek'])
xaxisDatetime = pd.to_datetime(data['datetime'])
expResultPerturb = ExperimentResult(
'results/nyc_taxi_experiment_continuous_likelihood_perturb/learning_window'+str(1001.0)+'/')
negLLLSTM1000Perturb = expResultPerturb.error
truthLSTM1000Perturb = expResultPerturb.truth
expResultPerturb = ExperimentResult(
'results/nyc_taxi_experiment_continuous_likelihood_perturb/learning_window'+str(3001.0)+'/')
negLLLSTM3000Perturb = expResultPerturb.error
truthLSTM3000Perturb = expResultPerturb.truth
expResultPerturb = ExperimentResult(
'results/nyc_taxi_experiment_continuous_likelihood_perturb/learning_window'+str(6001.0)+'/')
negLLLSTM6000Perturb = expResultPerturb.error
truthLSTM6000Perturb = expResultPerturb.truth
expResultPerturb = ExperimentResult(
'results/nyc_taxi_experiment_continuous_likelihood_perturb_online/learning_window'+str(100.0)+'/')
negLLLSTMonlinePerturb = expResultPerturb.error
truth_LSTMonline_perturb = expResultPerturb.truth
dataSet = 'nyc_taxi_perturb'
tmPredictionPerturb = np.load('./result/' + dataSet + 'TMprediction.npy')
tmTruthPerturb = np.load('./result/' + dataSet + 'TMtruth.npy')
encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
filePath = './prediction/' + dataSet + '_TM_pred.csv'
predDataTM = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['step', 'value', 'prediction5'])
predDataTMfiveStep = np.array(predDataTM['prediction5'])
iteration = predDataTM.index
tmPredPerturbTruth = np.roll(predDataTM['value'], -5)
tmPredPerturb = np.array(predDataTM['prediction5'])
filePath = './prediction/' + dataSet + '_esn_pred.csv'
predDataESN = pd.read_csv(filePath, header=0, skiprows=[1, 2],
names=['step', 'value', 'prediction5'])
esnPredPerturbTruth = np.roll(predDataESN['value'], -5)
esnPredPerturb = np.array(predDataESN['prediction5'])
negLLTMPerturb = computeLikelihood(tmPredictionPerturb, tmTruthPerturb, encoder)
negLLTMPerturb[:6000] = None
nrmseTMPerturb = computeSquareDeviation(tmPredPerturb, tmPredPerturbTruth)
mapeTMPerturb = np.abs(tmPredPerturb - tmPredPerturbTruth)
mapeESNPerturb = np.abs(esnPredPerturb - esnPredPerturbTruth)
expResultPerturb1000 = ExperimentResult(
'results/nyc_taxi_experiment_continuous_perturb/learning_window'+str(1001.0)+'/')
expResultPerturb3000 = ExperimentResult(
'results/nyc_taxi_experiment_continuous_perturb/learning_window'+str(3001.0)+'/')
expResultPerturb6000 = ExperimentResult(
'results/nyc_taxi_experiment_continuous_perturb/learning_window'+str(6001.0)+'/')
expResultPerturbOnline = ExperimentResult(
'results/nyc_taxi_experiment_continuous_perturb_online/learning_window'+str(200.0)+'/')
nrmseLSTM1000Perturb = expResultPerturb1000.error
nrmseLSTM3000Perturb = expResultPerturb3000.error
nrmseLSTM6000Perturb = expResultPerturb6000.error
nrmseLSTMOnlinePerturb = expResultPerturbOnline.error
mapeLSTM1000Perturb = np.abs(expResultPerturb1000.truth - expResultPerturb1000.predictions)
mapeLSTM3000Perturb = np.abs(expResultPerturb3000.truth - expResultPerturb3000.predictions)
mapeLSTM6000Perturb = np.abs(expResultPerturb6000.truth - expResultPerturb6000.predictions)
mapeLSTMOnlinePerturb = np.abs(expResultPerturbOnline.truth - expResultPerturbOnline.predictions)
plt.figure()
window = 400
plotAccuracy((mapeLSTM1000Perturb, xaxisDatetime), truthLSTM3000Perturb,
window=window, errorType='mape', label='LSTM1000', train=expResultPerturb1000.train)
plotAccuracy((mapeLSTM3000Perturb, xaxisDatetime), truthLSTM3000Perturb,
window=window, errorType='mape', label='LSTM3000')
plotAccuracy((mapeLSTM6000Perturb, xaxisDatetime), truthLSTM6000Perturb,
window=window, errorType='mape', label='LSTM6000')
plotAccuracy((mapeLSTMOnlinePerturb, xaxisDatetime), truth_LSTMonline_perturb,
window=window, errorType='mape', label='LSTM-online')
plotAccuracy((mapeTMPerturb, xaxisDatetime), tmTruthPerturb,
window=window, errorType='mape', label='TM')
plt.axvline(xaxisDatetime[13152], color='black', linestyle='--')
plt.xlim([xaxisDatetime[13000], xaxisDatetime[15000]])
plt.legend()
plt.ylim([.1, .4])
plt.ylabel('MAPE')
plt.savefig(figPath + 'example_perturbation_MAPE.pdf')
plt.figure()
plotAccuracy((negLLLSTM3000Perturb, xaxisDatetime), truthLSTM3000Perturb,
window=window, errorType='negLL', label='LSTM3000')
# plotAccuracy((negLL_LSTM3000_perturb_baseline, xaxis_datetime), truth_LSTM3000_perturb, window=window, errorType='negLL', label='TM')
plotAccuracy((negLLLSTM6000Perturb, xaxisDatetime), truthLSTM6000Perturb, window=window, errorType='negLL', label='LSTM6000')
plotAccuracy((negLLLSTMonlinePerturb, xaxisDatetime), truthLSTM6000Perturb, window=window, errorType='negLL', label='LSTM-online')
# plotAccuracy((negLL_LSTM6000_perturb_baseline, xaxis_datetime), truth_LSTM3000_perturb, window=window, errorType='negLL', label='TM')
plotAccuracy((negLLTMPerturb, xaxisDatetime), tmTruthPerturb, window=window, errorType='negLL', label='TM')
plt.axvline(xaxisDatetime[13152], color='black', linestyle='--')
plt.xlim([xaxisDatetime[13000], xaxisDatetime[15000]])
plt.legend()
plt.ylim([1.2, 2.3])
plt.ylabel('Negative Log-Likelihood')
plt.savefig(figPath + 'example_perturbation.pdf')
startFrom = 13152
endAt = startFrom+17520
norm_factor = np.nanstd(tmTruthPerturb[startFrom:endAt])
fig, ax = plt.subplots(nrows=1, ncols=3)
inds = np.arange(5)
width = 0.5
ax1 = ax[0]
ax1.bar(inds, [np.sqrt(np.nanmean(nrmseLSTMOnlinePerturb[startFrom:endAt])) / norm_factor,
np.sqrt(np.nanmean(nrmseLSTM1000Perturb[startFrom:endAt])) / norm_factor,
np.sqrt(np.nanmean(nrmseLSTM3000Perturb[startFrom:endAt])) / norm_factor,
np.sqrt(np.nanmean(nrmseLSTM6000Perturb[startFrom:endAt])) / norm_factor,
np.sqrt(np.nanmean(nrmseTMPerturb[startFrom:endAt])) / norm_factor], width=width)
ax1.set_xticks(inds+width/2)
ax1.set_xticklabels( ('LSTMonline', 'LSTM1000', 'LSTM3000', 'LSTM6000', 'TM') )
ax1.set_xlim([inds[0]-width*.6, inds[-1]+width*1.4])
ax1.set_ylabel('NRMSE')
ax2 = ax[1]
width = 0.5
norm_factor = np.nanmean(np.abs(tmTruthPerturb[startFrom:endAt]))
ax2.bar(inds, [np.nanmean(mapeLSTMOnlinePerturb[startFrom:endAt]) / norm_factor,
np.nanmean(mapeLSTM1000Perturb[startFrom:endAt]) / norm_factor,
np.nanmean(mapeLSTM3000Perturb[startFrom:endAt]) / norm_factor,
np.nanmean(mapeLSTM6000Perturb[startFrom:endAt] / norm_factor),
np.nanmean(mapeTMPerturb[startFrom:endAt]) / norm_factor], width=width)
ax2.set_xticks(inds+width/2)
ax2.set_xticklabels( ('LSTMonline', 'LSTM1000', 'LSTM3000', 'LSTM6000', 'TM') )
ax2.set_xlim([inds[0]-width*.6, inds[-1]+width*1.4])
ax2.set_ylabel('MAPE')
ax3 = ax[2]
width = 0.5
ax3.bar(inds, [np.nanmean(negLLLSTMonlinePerturb[startFrom:endAt]),
np.nanmean(negLLLSTM1000Perturb[startFrom:endAt]),
np.nanmean(negLLLSTM3000Perturb[startFrom:endAt]),
np.nanmean(negLLLSTM6000Perturb[startFrom:endAt]),
np.nanmean(negLLTMPerturb[startFrom:])], width=width)
ax3.set_xticks(inds+width/2)
ax3.set_xticklabels( ('LSTMonline', 'LSTM1000', 'LSTM3000', 'LSTM6000', 'TM') )
ax3.set_xlim([inds[0]-width*.6, inds[-1]+width*1.4])
ax3.set_ylabel('Negative Log-likelihood')
plt.savefig(figPath + 'model_performance_after_perturbation.pdf') | agpl-3.0 |
aetilley/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
nsdf/nsdf | graphviz/HH_figure/figure_hh_a.py | 1 | 4978 | # This script was written by Chaitanya Chintaluri [email protected]
# This software is available under GNU GPL3 License.
# Uses pygraphviz to illustrate the inner structure of NSDF file format
# This is to use in the NSDF paper and to generate machine readable file structure
# for the convenience of the user.
# Use matplotlib and pygraphviz
import re
import matplotlib.pyplot as plt
import pygraphviz as pgv
import webcolors as wc
width_box = 1.0
edge_width = 1.0
font_name = 'Arial'
font_size = 12.0 #in pts
subgrp_shape = 'tab' #http://www.graphviz.org/doc/info/shapes.html#d:style
leafnde_shape = 'note'
NODE_0 = wc.name_to_hex('white') #root
NODE_1 = wc.name_to_hex('skyblue') #data/map/model
NODE_2 = wc.name_to_hex('wheat') #uniform/static
NODE_3 = wc.name_to_hex('lightgreen') #population
NODE_4 = wc.name_to_hex('sandybrown') #parameter
NODE_5 = wc.name_to_hex('lightgrey') #oned
NODE_COLOR = [NODE_0, NODE_1, NODE_2, NODE_3, NODE_4, NODE_5]
def add_child(G, parent_node, child, color=None, end_node=False):
if parent_node=='/':
child_x = parent_node+child
else:
child_x = parent_node+'/'+child
G.add_node(child_x+'_point', shape='point', width=0.05)
child_point_node = G.get_node(child_x+'_point')
G.add_edge(parent_node, child_point_node, weight=2, penwidth=edge_width, arrowsize=0.0, arrowhead=None, constraint=False)
if end_node:
G.add_node(child_x, label=child, width=width_box, shape=leafnde_shape, style='filled', concentrate=True, fillcolor=color,
fontname=font_name, fontsize=font_size)
else:
G.add_node(child_x, label=child, width=width_box, shape=subgrp_shape, style='filled', concentrate=True, fillcolor=color,
fontname=font_name, fontsize=font_size)
child_node = G.get_node(child_x)
G.add_edge(child_point_node, child_node, penwidth=edge_width, weight=3)
H = G.subgraph([child_point_node, parent_node], rank='same', constraint=False)
H = G.subgraph([child_point_node, child], rank='same')
return child_node
def gen_figure(dir_list):
G = pgv.AGraph(strict=True, directed=True, rankdir='LR', ranksep='0.15', splines=False, nodesep=0.25)
G.add_node('/', label='ROOT', shape=subgrp_shape, style='filled', concentrate=True, width=width_box,
fontname=font_name, fontsize=font_size, fillcolor=NODE_0)
for path in dir_list:
if path.startswith('/'):
pass
else:
path = '/'+path #starting with root
path_idx = [m.start() for m in re.finditer('/', path)]
sub_dirs = path.split('/')[1:] #skip the first
for ii,sub_folder in enumerate(sub_dirs):
try:
dummy = G.get_node(path[:path_idx[ii]]+'/'+sub_folder)
#print 'Node already exists:', path[:path_idx[ii]]+'/'+sub_folder
pass
except KeyError:
if ii==0:
add_child(G, '/', sub_folder, NODE_COLOR[ii+1])
elif ii==3 :
add_child(G, path[:path_idx[ii]], sub_folder, NODE_COLOR[ii+1], True)
elif ii==2 and (path.find('map')!=-1 or path.find('model')!=-1) :
add_child(G, path[:path_idx[ii]], sub_folder, NODE_COLOR[ii+1], True)
else:
add_child(G, path[:path_idx[ii]], sub_folder, NODE_COLOR[ii+1])
return G
def add_leaf(G, parent, leaf_name, leaf_html):
G.add_node(leaf_name, label=leaf_html, shape='box', style='filled', concentrate=True, width=width_box,
fontname=font_name, fontsize=font_size, fillcolor=NODE_4)
G.add_edge(parent, leaf_name, weight=1, penwidth=edge_width, arrowsize=0.0, style='dashed',
arrowhead=None, constraint=True, headport="nw", tailport="ne")
G.add_edge(leaf_name, parent, weight=1, penwidth=edge_width, arrowsize=0.0, style='dashed',
arrowhead=None, constraint=True, headport="se", tailport="sw")
#leaf_point_node = G.get_node(parent+'_point')
#H = G.subgraph([leaf_name, parent], rank='max', constraint=False)
return G
# dir_list = ['/data/event',
# '/data/nonuniform',
# '/data/static',
# '/data/uniform/channel/Ik',
# '/data/uniform/compartment/Vm',
# '/map/event',
# '/map/nonuniform',
# '/map/static',
# '/map/time',
# '/map/uniform/channel',
# '/map/uniform/compartment',
# '/model/modeltree/compartment/KChannel',
# '/model/modeltree/compartment/NaChannel']
dir_list = ['/data/uniform/compartment/Vm',
'/map/uniform/compartment',
'/model/modeltree/compartment']
G = gen_figure(dir_list)
# add_leaf(G, dir_list[0], 'static', static)
# add_leaf(G, dir_list[1], 'vm', Vm)
# add_leaf(G, dir_list[3], 'im', Im1)
# add_leaf(G, dir_list[4], 'spikes', spikes)
G.layout('dot')
G.draw('figure_hh_a.svg')
| gpl-3.0 |
fernandoandreotti/cinc-challenge2017 | deeplearn-approach/train_model.py | 1 | 15717 | '''
This function function used for training and cross-validating model using. The database is not
included in this repo, please download the CinC Challenge database and truncate/pad data into a
NxM matrix array, being N the number of recordings and M the window accepted by the network (i.e.
30 seconds).
For more information visit: https://github.com/fernandoandreotti/cinc-challenge2017
Referencing this work
Andreotti, F., Carr, O., Pimentel, M.A.F., Mahdi, A., & De Vos, M. (2017). Comparing Feature Based
Classifiers and Convolutional Neural Networks to Detect Arrhythmia from Short Segments of ECG. In
Computing in Cardiology. Rennes (France).
--
cinc-challenge2017, version 1.0, Sept 2017
Last updated : 27-09-2017
Released under the GNU General Public License
Copyright (C) 2017 Fernando Andreotti, Oliver Carr, Marco A.F. Pimentel, Adam Mahdi, Maarten De Vos
University of Oxford, Department of Engineering Science, Institute of Biomedical Engineering
[email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import scipy.io
import gc
import itertools
from sklearn.metrics import confusion_matrix
import sys
sys.path.insert(0, './preparation')
# Keras imports
import keras
from keras.models import Model
from keras.layers import Input, Conv1D, Dense, Flatten, Dropout,MaxPooling1D, Activation, BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import plot_model
from keras import backend as K
from keras.callbacks import Callback,warnings
###################################################################
### Callback method for reducing learning rate during training ###
###################################################################
class AdvancedLearnignRateScheduler(Callback):
'''
# Arguments
monitor: quantity to be monitored.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In 'min' mode,
training will stop when the quantity
monitored has stopped decreasing; in 'max'
mode it will stop when the quantity
monitored has stopped increasing.
'''
def __init__(self, monitor='val_loss', patience=0,verbose=0, mode='auto', decayRatio=0.1):
super(Callback, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.wait = 0
self.decayRatio = decayRatio
if mode not in ['auto', 'min', 'max']:
warnings.warn('Mode %s is unknown, '
'fallback to auto mode.'
% (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
current_lr = K.get_value(self.model.optimizer.lr)
print("\nLearning rate:", current_lr)
if current is None:
warnings.warn('AdvancedLearnignRateScheduler'
' requires %s available!' %
(self.monitor), RuntimeWarning)
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate' % (epoch))
assert hasattr(self.model.optimizer, 'lr'), \
'Optimizer must have a "lr" attribute.'
current_lr = K.get_value(self.model.optimizer.lr)
new_lr = current_lr * self.decayRatio
K.set_value(self.model.optimizer.lr, new_lr)
self.wait = 0
self.wait += 1
###########################################
## Function to plot confusion matrices ##
#########################################
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
cm = np.around(cm, decimals=3)
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('confusion.eps', format='eps', dpi=1000)
#####################################
## Model definition ##
## ResNet based on Rajpurkar ##
##################################
def ResNet_model(WINDOW_SIZE):
# Add CNN layers left branch (higher frequencies)
# Parameters from paper
INPUT_FEAT = 1
OUTPUT_CLASS = 4 # output classes
k = 1 # increment every 4th residual block
p = True # pool toggle every other residual block (end with 2^8)
convfilt = 64
convstr = 1
ksize = 16
poolsize = 2
poolstr = 2
drop = 0.5
# Modelling with Functional API
#input1 = Input(shape=(None,1), name='input')
input1 = Input(shape=(WINDOW_SIZE,INPUT_FEAT), name='input')
## First convolutional block (conv,BN, relu)
x = Conv1D(filters=convfilt,
kernel_size=ksize,
padding='same',
strides=convstr,
kernel_initializer='he_normal')(input1)
x = BatchNormalization()(x)
x = Activation('relu')(x)
## Second convolutional block (conv, BN, relu, dropout, conv) with residual net
# Left branch (convolutions)
x1 = Conv1D(filters=convfilt,
kernel_size=ksize,
padding='same',
strides=convstr,
kernel_initializer='he_normal')(x)
x1 = BatchNormalization()(x1)
x1 = Activation('relu')(x1)
x1 = Dropout(drop)(x1)
x1 = Conv1D(filters=convfilt,
kernel_size=ksize,
padding='same',
strides=convstr,
kernel_initializer='he_normal')(x1)
x1 = MaxPooling1D(pool_size=poolsize,
strides=poolstr)(x1)
# Right branch, shortcut branch pooling
x2 = MaxPooling1D(pool_size=poolsize,
strides=poolstr)(x)
# Merge both branches
x = keras.layers.add([x1, x2])
del x1,x2
## Main loop
p = not p
for l in range(15):
if (l%4 == 0) and (l>0): # increment k on every fourth residual block
k += 1
# increase depth by 1x1 Convolution case dimension shall change
xshort = Conv1D(filters=convfilt*k,kernel_size=1)(x)
else:
xshort = x
# Left branch (convolutions)
# notice the ordering of the operations has changed
x1 = BatchNormalization()(x)
x1 = Activation('relu')(x1)
x1 = Dropout(drop)(x1)
x1 = Conv1D(filters=convfilt*k,
kernel_size=ksize,
padding='same',
strides=convstr,
kernel_initializer='he_normal')(x1)
x1 = BatchNormalization()(x1)
x1 = Activation('relu')(x1)
x1 = Dropout(drop)(x1)
x1 = Conv1D(filters=convfilt*k,
kernel_size=ksize,
padding='same',
strides=convstr,
kernel_initializer='he_normal')(x1)
if p:
x1 = MaxPooling1D(pool_size=poolsize,strides=poolstr)(x1)
# Right branch: shortcut connection
if p:
x2 = MaxPooling1D(pool_size=poolsize,strides=poolstr)(xshort)
else:
x2 = xshort # pool or identity
# Merging branches
x = keras.layers.add([x1, x2])
# change parameters
p = not p # toggle pooling
# Final bit
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Flatten()(x)
#x = Dense(1000)(x)
#x = Dense(1000)(x)
out = Dense(OUTPUT_CLASS, activation='softmax')(x)
model = Model(inputs=input1, outputs=out)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
#model.summary()
#sequential_model_to_ascii_printout(model)
plot_model(model, to_file='model.png')
return model
###########################################################
## Function to perform K-fold Crossvalidation on model ##
##########################################################
def model_eval(X,y):
batch =64
epochs = 20
rep = 1 # K fold procedure can be repeated multiple times
Kfold = 5
Ntrain = 8528 # number of recordings on training set
Nsamp = int(Ntrain/Kfold) # number of recordings to take as validation
# Need to add dimension for training
X = np.expand_dims(X, axis=2)
classes = ['A', 'N', 'O', '~']
Nclass = len(classes)
cvconfusion = np.zeros((Nclass,Nclass,Kfold*rep))
cvscores = []
counter = 0
# repetitions of cross validation
for r in range(rep):
print("Rep %d"%(r+1))
# cross validation loop
for k in range(Kfold):
print("Cross-validation run %d"%(k+1))
# Callbacks definition
callbacks = [
# Early stopping definition
EarlyStopping(monitor='val_loss', patience=3, verbose=1),
# Decrease learning rate by 0.1 factor
AdvancedLearnignRateScheduler(monitor='val_loss', patience=1,verbose=1, mode='auto', decayRatio=0.1),
# Saving best model
ModelCheckpoint('weights-best_k{}_r{}.hdf5'.format(k,r), monitor='val_loss', save_best_only=True, verbose=1),
]
# Load model
model = ResNet_model(WINDOW_SIZE)
# split train and validation sets
idxval = np.random.choice(Ntrain, Nsamp,replace=False)
idxtrain = np.invert(np.in1d(range(X_train.shape[0]),idxval))
ytrain = y[np.asarray(idxtrain),:]
Xtrain = X[np.asarray(idxtrain),:,:]
Xval = X[np.asarray(idxval),:,:]
yval = y[np.asarray(idxval),:]
# Train model
model.fit(Xtrain, ytrain,
validation_data=(Xval, yval),
epochs=epochs, batch_size=batch,callbacks=callbacks)
# Evaluate best trained model
model.load_weights('weights-best_k{}_r{}.hdf5'.format(k,r))
ypred = model.predict(Xval)
ypred = np.argmax(ypred,axis=1)
ytrue = np.argmax(yval,axis=1)
cvconfusion[:,:,counter] = confusion_matrix(ytrue, ypred)
F1 = np.zeros((4,1))
for i in range(4):
F1[i]=2*cvconfusion[i,i,counter]/(np.sum(cvconfusion[i,:,counter])+np.sum(cvconfusion[:,i,counter]))
print("F1 measure for {} rhythm: {:1.4f}".format(classes[i],F1[i,0]))
cvscores.append(np.mean(F1)* 100)
print("Overall F1 measure: {:1.4f}".format(np.mean(F1)))
K.clear_session()
gc.collect()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
counter += 1
# Saving cross validation results
scipy.io.savemat('xval_results.mat',mdict={'cvconfusion': cvconfusion.tolist()})
return model
###########################
## Function to load data ##
###########################
def loaddata(WINDOW_SIZE):
'''
Load training/test data into workspace
This function assumes you have downloaded and padded/truncated the
training set into a local file named "trainingset.mat". This file should
contain the following structures:
- trainset: NxM matrix of N ECG segments with length M
- traintarget: Nx4 matrix of coded labels where each column contains
one in case it matches ['A', 'N', 'O', '~'].
'''
print("Loading data training set")
matfile = scipy.io.loadmat('trainingset.mat')
X = matfile['trainset']
y = matfile['traintarget']
# Merging datasets
# Case other sets are available, load them then concatenate
#y = np.concatenate((traintarget,augtarget),axis=0)
#X = np.concatenate((trainset,augset),axis=0)
X = X[:,0:WINDOW_SIZE]
return (X, y)
#####################
# Main function ##
###################
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
seed = 7
np.random.seed(seed)
# Parameters
FS = 300
WINDOW_SIZE = 30*FS # padding window for CNN
# Loading data
(X_train,y_train) = loaddata(WINDOW_SIZE)
# Training model
model = model_eval(X_train,y_train)
# Outputing results of cross validation
matfile = scipy.io.loadmat('xval_results.mat')
cv = matfile['cvconfusion']
F1mean = np.zeros(cv.shape[2])
for j in range(cv.shape[2]):
classes = ['A', 'N', 'O', '~']
F1 = np.zeros((4,1))
for i in range(4):
F1[i]=2*cv[i,i,j]/(np.sum(cv[i,:,j])+np.sum(cv[:,i,j]))
print("F1 measure for {} rhythm: {:1.4f}".format(classes[i],F1[i,0]))
F1mean[j] = np.mean(F1)
print("mean F1 measure for: {:1.4f}".format(F1mean[j]))
print("Overall F1 : {:1.4f}".format(np.mean(F1mean)))
# Plotting confusion matrix
cvsum = np.sum(cv,axis=2)
for i in range(4):
F1[i]=2*cvsum[i,i]/(np.sum(cvsum[i,:])+np.sum(cvsum[:,i]))
print("F1 measure for {} rhythm: {:1.4f}".format(classes[i],F1[i,0]))
F1mean = np.mean(F1)
print("mean F1 measure for: {:1.4f}".format(F1mean))
plot_confusion_matrix(cvsum, classes,normalize=True,title='Confusion matrix')
| gpl-3.0 |
ofgulban/scikit-image | doc/ext/sphinx_gallery/notebook.py | 6 | 3565 | # -*- coding: utf-8 -*-
r"""
============================
Parser for Jupyter notebooks
============================
Class that holds the Jupyter notebook information
"""
# Author: Óscar Nájera
# License: 3-clause BSD
from __future__ import division, absolute_import, print_function
import json
import os
import re
import sys
def ipy_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython" + str(py_version[0]),
"version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
}
},
"nbformat": 4,
"nbformat_minor": 0
}
return notebook_skeleton
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the Jupyter notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'$${0}$$'.format(match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+)`')
text = re.sub(inline_math, r'$\1$', text)
return text
class Notebook(object):
"""Jupyter notebook object
Constructs the file cell-by-cell and writes it at the end"""
def __init__(self, file_name, target_dir):
"""Declare the skeleton of the notebook
Parameters
----------
file_name : str
original script file name, .py extension will be renamed
target_dir: str
directory where notebook file is to be saved
"""
self.file_name = file_name.replace('.py', '.ipynb')
self.write_file = os.path.join(target_dir, self.file_name)
self.work_notebook = ipy_notebook_skeleton()
self.add_code_cell("%matplotlib inline")
def add_code_cell(self, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
self.work_notebook["cells"].append(code_cell)
def add_markdown_cell(self, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
self.work_notebook["cells"].append(markdown_cell)
def save_file(self):
"""Saves the notebook to a file"""
with open(self.write_file, 'w') as out_nb:
json.dump(self.work_notebook, out_nb, indent=2)
| bsd-3-clause |
guschmue/tensorflow | tensorflow/python/estimator/inputs/inputs.py | 94 | 1290 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long
_allowed_symbols = [
'numpy_input_fn',
'pandas_input_fn'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 |
jameshensman/GPy | GPy/plotting/matplot_dep/img_plots.py | 15 | 2159 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
The module contains the tools for ploting 2D image visualizations
"""
import numpy as np
from matplotlib.cm import jet
width_max = 15
height_max = 12
def _calculateFigureSize(x_size, y_size, fig_ncols, fig_nrows, pad):
width = (x_size*fig_ncols+pad*(fig_ncols-1))
height = (y_size*fig_nrows+pad*(fig_nrows-1))
if width > float(height)/height_max*width_max:
return (width_max, float(width_max)/width*height)
else:
return (float(height_max)/height*width, height_max)
def plot_2D_images(figure, arr, symmetric=False, pad=None, zoom=None, mode=None, interpolation='nearest'):
ax = figure.add_subplot(111)
if len(arr.shape)==2:
arr = arr.reshape(*((1,)+arr.shape))
fig_num = arr.shape[0]
y_size = arr.shape[1]
x_size = arr.shape[2]
fig_ncols = int(np.ceil(np.sqrt(fig_num)))
fig_nrows = int(np.ceil((float)(fig_num)/fig_ncols))
if pad==None:
pad = max(int(min(y_size,x_size)/10),1)
figsize = _calculateFigureSize(x_size, y_size, fig_ncols, fig_nrows, pad)
#figure.set_size_inches(figsize,forward=True)
#figure.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95)
if symmetric:
# symmetric around zero: fix zero as the middle color
mval = max(abs(arr.max()),abs(arr.min()))
arr = arr/(2.*mval)+0.5
else:
minval,maxval = arr.min(),arr.max()
arr = (arr-minval)/(maxval-minval)
if mode=='L':
arr_color = np.empty(arr.shape+(3,))
arr_color[:] = arr.reshape(*(arr.shape+(1,)))
elif mode==None or mode=='jet':
arr_color = jet(arr)
buf = np.ones((y_size*fig_nrows+pad*(fig_nrows-1), x_size*fig_ncols+pad*(fig_ncols-1), 3),dtype=arr.dtype)
for y in range(fig_nrows):
for x in range(fig_ncols):
if y*fig_ncols+x<fig_num:
buf[y*y_size+y*pad:(y+1)*y_size+y*pad, x*x_size+x*pad:(x+1)*x_size+x*pad] = arr_color[y*fig_ncols+x,:,:,:3]
img_plot = ax.imshow(buf, interpolation=interpolation)
ax.axis('off')
| bsd-3-clause |
woozzu/tf_tutorials | 03_YourCNN_MNIST_starter.py | 1 | 3220 | '''
Build your own deep network using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Code references:
https://github.com/shouvikmani/Tensorflow-Deep-Learning-Tutorial/blob/master/tutorial.ipynb
https://github.com/aymericdamien/TensorFlow-Examples/
The source code modified modified by S.W. Oh.
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
# import Dense (fully-connected) layer and Convolution layer
from util.layer import Dense, Conv2D, BatchNorm
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./data/", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 5
batch_size = 10
display_step = 1
###### Build graph ######################################################
# Place holders
x = tf.placeholder(tf.float32, [None,28,28,1]) # mnist data image of shape [28,28,1]
y = tf.placeholder(tf.float32, [None,10]) # 0-9 digits recognition => 10 classes
is_train = tf.placeholder(tf.bool, shape=[]) # Train flag
######################################################################
# your code here !!
# Layer Usages:
# h = Conv2D(h, [3,3,1,8], [1,1,1,1], 'SAME', 'conv1')
# h = BatchNorm(h, is_train, decay=0.9, name='bn1')
# h = tf.nn.relu(h)
# h = tf.nn.max_pool(h, [1,2,2,1], [1,2,2,1], 'SAME')
# h = Dense(h, [8,10], 'fc1')
#######################################################################
pred = tf.nn.softmax(logit) # Softmax
# Directly compute loss from logit (to ensure stability and avoid overflow)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=y))
# Define optimizer and train_op
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
#########################################################################
###### Start Training ###################################################
# Open a Session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = np.reshape(batch_xs, [batch_size,28,28,1])
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([train_op, cost], feed_dict={x: batch_xs, y: batch_ys, is_train: True})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy:", accuracy.eval({x: np.reshape(mnist.test.images, [-1,28,28,1]), y: mnist.test.labels, is_train: False})) | mit |
Experiments-Data-Base-Managment/data-base-managment | view/Screen_Result_Experiment.py | 1 | 5238 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
import sys
import matplotlib.pyplot as plt
sys.path.append("../")
from controller.Controller_Result_Experiment import ControllerScreenResultExperiment
from Window import Main, Gtk, GdkPixbuf
controller = ControllerScreenResultExperiment()
class WindowResultExperiment:
def __init__(self, path, set_results, controller_screen_new_experiment):
self.set_results = set_results
self.window = Main()
self.window.set_handler(controller.get_handler())
self.window.set_file_ui(path)
self.window.connect_handles_ui()
controller.set_screen_result_experiment(self)
controller.set_controller_screen_new_experiment(controller_screen_new_experiment)
self.window.set_name_object("result_window")
self.set_tables(self.window.get_object_from_window("tabela_resultados"),self.window.get_object_from_window("liststore_result_experiment"))
self.set_tree_views(self.window.get_object_from_window("tree_view"),self.window.get_object_from_window("tree_view2"))
self.create_columns()
self.index = 0
self.axis_x = []
self.axis_y = []
#set_results.print_set_results()
while (self.index < len(self.set_results.get_measurements())):
self.times_and_volts = self.set_results.get_specific_measurement(self.index).split('|')
for i in range(0,len(self.times_and_volts)-1):
self.times_and_volts[i] = self.times_and_volts[i].split(';')
self.insert_data_table(self.times_and_volts[i][0], self.times_and_volts[i][1])
'''
add measurements datas to vector
'''
self.axis_x.append(self.times_and_volts[i][0])
self.axis_y.append(self.times_and_volts[i][1])
self.index += 1
'''
Create graphics from collected data, save it and show him in result screen
'''
plt.plot(self.axis_x,self.axis_y)
plt.xlabel('Tempo')
plt.ylabel('Tensao')
plt.title('Processo de Carga do Capcitor')
plt.grid(True)
#plt.tight_layout()
plt.savefig("curva_capacitor.jpeg", dpi = 800)
graphic = self.window.get_object_from_window("graphic")
#make picture
self.pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(filename="curva_capacitor.jpeg", width=700, height=500, preserve_aspect_ratio=True)
graphic.set_from_pixbuf(self.pixbuf)
#graphic.set_from_file("curva_capacitor.jpeg")
controller.fill_experiments_data()
controller.fill_table_results()
def show_window(self):
self.window.start_window()
Gtk.main()
'''
Os métodos a partir deste ponto do código lidam com a estrutura de tabela
apresentada na tela de resultados.
'''
def set_tables(self, table_1, table_2):
self.table_1 = table_1
self.table_2 = table_2
def set_tree_views(self, tree_view_1, tree_view_2):
self.tree_view_1 = tree_view_1
self.tree_view_2 = tree_view_2
'''
Método que cria o número de colunas da tabela de resultados e define o tipo de dado
'''
def create_columns(self):
cell_tree_view_1 = Gtk.CellRendererText()
self.tree_view_1.get_column(0).pack_start(cell_tree_view_1, False)
self.tree_view_1.get_column(0).add_attribute(cell_tree_view_1, "text", 0)
self.tree_view_1.get_column(1).pack_start(cell_tree_view_1, False)
self.tree_view_1.get_column(1).add_attribute(cell_tree_view_1, "text", 1)
cell_tree_view_2 = Gtk.CellRendererText()
self.tree_view_2.get_column(0).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(0).add_attribute(cell_tree_view_2, "text", 0)
self.tree_view_2.get_column(1).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(1).add_attribute(cell_tree_view_2, "text", 1)
self.tree_view_2.get_column(2).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(2).add_attribute(cell_tree_view_2, "text", 2)
self.tree_view_2.get_column(3).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(3).add_attribute(cell_tree_view_2, "text", 3)
self.tree_view_2.get_column(4).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(4).add_attribute(cell_tree_view_2, "text", 4)
self.tree_view_2.get_column(5).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(5).add_attribute(cell_tree_view_2, "text", 5)
self.tree_view_2.get_column(6).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(6).add_attribute(cell_tree_view_2, "text", 6)
self.tree_view_2.get_column(7).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(7).add_attribute(cell_tree_view_2, "text", 7)
self.tree_view_2.get_column(8).pack_start(cell_tree_view_2, False)
self.tree_view_2.get_column(8).add_attribute(cell_tree_view_2, "text", 8)
def insert_data_table(self, volt, seconds):
iter_tree = self.table_1.prepend([volt,seconds])
def set_value_volt(self, volt):
self.volt = volt
def set_value_second(self, seconds):
self.second = seconds
def get_control(self):
return self.controller
def get_table_results(self):
return self.table_2
def get_set_results(self):
return self.set_results
'''
Método que retorna o objeto window
'''
def get_window(self):
return self.window
#windowResult = WindowResultExperiment("../view/xml_windows/result_experiment.glade")
#windowResult.show_window()
| gpl-3.0 |
probml/pyprobml | scripts/vqDemo.py | 1 | 1121 | # Vector Quantization Demo
# Author: Animesh Gupta
# Use racoon face image
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.face.html
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
try: # SciPy >= 0.16 have face in misc
from scipy.misc import face
face = face(gray=True)
except ImportError:
face = sp.face(gray=True)
n_clusters = [2,4]
np.random.seed(0)
X = face.reshape((-1, 1)) # We need an (n_sample, n_feature) array
for n_cluster in n_clusters:
k_means = cluster.KMeans(n_clusters=n_cluster, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
face_compressed = np.choose(labels, values)
face_compressed.shape = face.shape
vmin = face.min()
vmax = face.max()
# compressed face
plt.figure(figsize=(4,4))
plt.title(f'K = {n_cluster}')
plt.imshow(face_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
plt.savefig(f"../figures/vectorQuantization_{n_cluster}.pdf", dpi=300)
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.