repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
RPGOne/scikit-learn
|
sklearn/tests/test_grid_search.py
|
68
|
28856
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
bsd-3-clause
|
zorojean/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
aabadie/scikit-learn
|
sklearn/datasets/samples_generator.py
|
26
|
56554
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
bsd-3-clause
|
manashmndl/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
267
|
6813
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
bsd-3-clause
|
ngoix/OCRF
|
sklearn/metrics/__init__.py
|
214
|
3440
|
"""
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
|
bsd-3-clause
|
sinhrks/scikit-learn
|
sklearn/tree/tree.py
|
23
|
40423
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
ekyauk/BeepBoop
|
linearSVM.py
|
1
|
5938
|
#!/usr/bin/env python
from pause import *
from sklearn.svm import SVC
from modelFunctions import *
HOTNESS_THRESHOLD = 0.623
#Set the random number generator
np.random.seed(5)
#baseline
featsBase = ['loudness','duration','mode','start_of_fade_out','tempo','end_of_fade_in','time_signature','tatum_mean','tatum_var','year','pitch_var1','pitch_var2','pitch_var3','pitch_var4','pitch_var5','pitch_var6','pitch_var7','pitch_var8','pitch_var9','pitch_var10','pitch_var11','pitch_var12','pitch_mean1','pitch_mean2','pitch_mean3','pitch_mean4','pitch_mean5','pitch_mean6','pitch_mean7','pitch_mean8','pitch_mean9','pitch_mean10','pitch_mean11','pitch_mean12','timbre_var1','timbre_var2','timbre_var3','timbre_var4','timbre_var5','timbre_var6','timbre_var7','timbre_var8','timbre_var9','timbre_var10','timbre_var11','timbre_var12','timbre_mean1','timbre_mean2','timbre_mean3','timbre_mean4','timbre_mean5','timbre_mean6','timbre_mean7','timbre_mean8','timbre_mean9','timbre_mean10','timbre_mean11','timbre_mean12']
#All features
f = open('trainingDataFull.csv', 'r')
featsAll = [word.strip() for word in f.readline().split(',')]
featsAll.remove('y_val')
featsAll.remove('artist_hotness')
featsSelect = ['loudness_sq', 'time_signature', 'artist_familiarity', 'year', 'pitch_var3', 'pitch_var8', 'pitch_var10', 'pitch_mean6', 'pitch_mean_sq6', 'timbre_var1', 'timbre_mean3', 'timbre_mean5', 'timbre_mean11', 'timbre_mean12', 't_male', 't_brazilian pop music', 't_contemporary', 't_tech metal', 't_post-hardcore', 't_new orleans', 't_electropop', 't_sexy', 't_post-bop', 't_speed metal', 't_jungle music', "s_i'm", 't_psychedelic trance', 't_contemporary gospel', 't_party music', 't_shock rock', 't_east coast rap', 't_alternative dance', 't_hardcore rap', 't_rock en espanol', 't_lounge', 't_indie', 's_one', 't_drummer', 't_mix', 't_epic', 't_straight edge', 't_emocore', 't_influential', 't_funny', 't_shoegaze', 't_pop', 't_symphonic', 't_trip hop', "t_rock 'n roll", 't_ohio', 't_conscious', 't_heavy metal', 't_twee', 't_emusic', 't_viking metal', 't_diva', 's_of', 't_grind', 't_dj', 't_argentina', 't_cabaret', 't_master', 't_indietronica', 't_bluegrass', 't_political rap', 't_acid jazz', 't_brazil', 't_doom metal', 't_fresh', 's_dream', 't_gangster rap', 't_alternative metal', 't_polka', 't_afrobeat', 't_intelligent', 't_guitar', 't_hardcore', 't_rock latin', 't_game', 't_roots reggae', 't_classic rock', 't_indie pop', 't_folk rock', 't_emo', 't_scandinavian metal', 't_future jazz', 't_indie hip hop', 't_scottish', 't_disco', 't_gothic metal', 't_80s', 't_melodic', 't_world music', 't_ninja tune', 't_lovers rock', 't_queens', 't_deutsch hiphop', 't_orchestra', 't_avant-garde', 't_seattle', 's_not', 't_americana', 't_brill building pop', 't_latin', 't_italian dance', 't_hard rock', 't_calming', 't_70s', 't_political', 't_pop underground', 't_deathgrind', 't_smooth jazz', 't_heavy', 't_aggressive', 't_rap metal', 't_close harmony', 't_grindcore', 't_psychedelic', 't_movie', 't_psychedelia', 't_street punk', 't_remix', 't_piano', 's_and', 't_chanson francaise', 's_el', 't_swedish', 't_roots rock', 't_roots', 't_samba', 't_jazz rap', 's_have', 's_are', 't_sophisticated', 's_your', 't_dancehall', 't_ebm', 't_progressive electronic', 's_la', 't_house', 't_alternative hip hop', 't_beautiful', 't_stoner', 't_garage', 't_funk rock', 't_speedcore', 't_rock steady', 't_beats', 't_french', 't_group', 's_down', 't_experimental electronic', 't_irish folk', 't_minnesota', 't_crossover', 't_christian hardcore', 't_meditation', 't_delta blues', 's_day', 's_want', 't_doo-wop', 't_acoustic', 't_digital hardcore', 's_in', 't_broken beat', 't_contemporary folk', 't_psychedelic rock', 't_alternative', 't_melodic death metal', 's_last', 't_southern rap', 't_energetic', 's_at', 't_extreme', 's_love', 't_urban folk', 't_folktronica', 't_stoner metal', 't_female vocals', 't_michigan', 't_uplifting', 't_sxsw 2007', 't_industrial metal', 't_moody', 't_brutal death metal', 't_powerful', 't_northern soul', 't_pop punk', 't_hip hop', 't_gospel', 't_east coast hip hop', 't_indie rock', 't_united states', 't_jazz fusion', 't_british invasion', 't_melancholia', 't_black metal', 't_soulful', 't_original score', 't_gothic', 't_chamber pop', 't_country rock', 't_world', 't_sad', 't_posi', 't_melodic punk', 't_parody', 't_hard trance', 't_hip pop', 't_comedy', 't_tango', 's_soul', 't_tech house', 't_melodic metal', 't_symphonic rock', 't_old', 't_trance', 't_electroclash', 's_(live', 't_abstract', 't_glam', 't_power pop', 't_fusion jazz', 't_jamaica', 't_jamaican', 't_international', 't_deathrock', 't_finnish rock', 't_musica', 't_national socialist black metal', 's_version)', 't_post rock', 't_inspirational', 't_religious music', 's_dance', 't_pop rock', 't_moshcore', 't_jump blues', 't_modern rock', 't_slide', 's_(live)', 's_no', 's_make', 't_metal', 't_opera', 't_freak folk', 't_rock', 't_acid', 't_southern', 't_traditional', 't_soft', 't_protopunk', 't_easy listening', 's_man', 's_rock', 't_violin', 't_ska', 't_american punk', 't_alternative country', 't_ska punk']
if __name__ == "__main__":
svc = SVC(kernel='linear', gamma=0.001, C=1, class_weight='balanced')
featsGroup = {'baseline':featsBase, 'featsAll':featsAll, 'featsSelect':featsSelect}
for group_name, feats in featsGroup.iteritems():
print '--------' + group_name + '------------'
[train_x, train_y] = parseDataFile('trainingDataFull.csv', feats)
[test_x, test_y] = parseDataFile('testDataFull.csv', feats)
trainModel(svc, train_x, train_y)
result_str = '\t\tPrecison\tRecall\t\tF1\t\tAccuracy\n'
result_str += 'Training\t' + getResultString(testModel(svc, train_x, train_y))
result_str += 'Test\t\t' + getResultString(testModel(svc, test_x, test_y))
result_str += 'CrossValidation\t' + getResultString(crossValidate(svc, 10, train_x, train_y))
print result_str
|
mit
|
SiLab-Bonn/pyBAR
|
pybar/scans/analyze_source_scan_gdac_data.py
|
1
|
22385
|
''' This script does the full analysis of a source scan where the global threshold setting was changed to reconstruct the charge injected in a sensor pixel
by a constant source. Several steps are done automatically:
Step 1 Tnterpret the raw data:
This step interprets the raw data from the FE and creates and plots distributions.
Everything is summed up, but the occupancy histogram is created per GDAC setting.
Step 2 Analyze selected hits:
This step just takes the single hit cluster of the interpreted data and analyzes these hits for each GDAC setting.
Step 3 Analyze cluster size:
In this step the fraction of 1,2,3,4, ... cluster sizes are determined for each GDAC setting.
Step 2.5 Histogram Cluster seeds:
Instead of using single hit cluster (step 2/3) one can also use the cluster seed hits. The result is the same.
Step 4 Analyze the injected charge:
Here the data from the previous steps is used to determine the injected charge. Plots of the results are shown.
'''
import logging
import os.path
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import tables as tb
from pybar.analysis import analysis
from pybar.analysis import analysis_utils
from pybar.analysis.plotting import plotting
from pybar.analysis.analyze_raw_data import AnalyzeRawData
analysis_configuration = {
"scan_name": [r'L:\SCC112\ThresholdCalibration\DifferentSources\Cadmium\1_scc_112_ext_trigger_gdac_scan'],
'input_file_calibration': r'L:\SCC112\ThresholdCalibration\scc_112\1_scc_112_threshold_calibration_calibration.h5',
"analysis_steps": [1, 2.5, 4], # the analysis includes the selected steps here. See explanation above.
"use_cluster_rate_correction": False, # corrects the hit rate, because one pixel hit cluster are less likely for low thresholds
"normalize_rate": True, # correct the number of GDACs per scan parameter by the number of triggers or scan time
"normalization_reference": 'time', # one can normalize the hits per GDAC setting to the number of events ('event') or time ('time')
"smoothness": 100, # the smoothness of the spline fit to the data
"vcal_calibration": 55.0, # calibration electrons/PlsrDAC
"n_bins": 300, # number of bins for the profile histogram
"col_span": [53, 76], # the column pixel range to use in the analysis
"row_span": [1, 336], # the row pixel range to use in the analysis
"min_cut_threshold": 1, # the minimum cut threshold for the occupancy to define pixel to use in the analysis
"max_cut_threshold": None, # the maximum cut threshold for the occupancy to define pixel to use in the analysis
"min_gdac": 0, # minimum threshold position in gdac settings to be used for the analysis
"max_gdac": 999999, # maximum threshold position in gdac settings to be used for the analysis
"min_thr": 3500, # minimum threshold position in gdac setting to be used for the analysis
"max_thr": 7000, # maximum threshold position in gdac setting to be used for the analysis
"plot_normalization": True, # active the output of the normalization
"plot_cluster_sizes": True,
"interpreter_warnings": True,
"overwrite_output_files": True
}
def plot_cluster_sizes(in_file_cluster_h5, in_file_calibration_h5, gdac_range):
mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:]
hist = in_file_cluster_h5.root.AllHistClusterSize[:]
hist_sum = np.sum(hist, axis=1)
hist_rel = hist / hist_sum[:, np.newaxis].astype('f4') * 100
hist_rel_error = hist_rel / np.sqrt(hist_sum[:, np.newaxis].astype('f4')) # TODO: check calculation
x = analysis_utils.get_mean_threshold_from_calibration(gdac_range, mean_threshold_calibration)
plt.grid(True)
plt.errorbar(x * analysis_configuration['vcal_calibration'], hist_rel[:, 1], yerr=hist_rel_error[:, 1].tolist(), fmt='-o')
plt.errorbar(x * analysis_configuration['vcal_calibration'], hist_rel[:, 2], yerr=hist_rel_error[:, 1].tolist(), fmt='-o')
plt.errorbar(x * analysis_configuration['vcal_calibration'], hist_rel[:, 3], yerr=hist_rel_error[:, 1].tolist(), fmt='-o')
plt.errorbar(x * analysis_configuration['vcal_calibration'], hist_rel[:, 4], yerr=hist_rel_error[:, 1].tolist(), fmt='-o')
plt.errorbar(x * analysis_configuration['vcal_calibration'], hist_rel[:, 5], yerr=hist_rel_error[:, 1].tolist(), fmt='-o')
plt.title('Frequency of different cluster sizes for different thresholds')
plt.xlabel('threshold [e]')
plt.ylabel('cluster size frequency [%]')
plt.legend(["1 hit cluster", "2 hit cluster", "3 hit cluster", "4 hit cluster", "5 hit cluster"], loc='best')
plt.ylim(0, 100)
plt.xlim(0, 12000)
plt.show()
plt.close()
def plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff, filename=None):
''' Fit spline to the profile histogramed data, differentiate, determine MPV and plot.
Parameters
----------
x_p, y_p : array like
data points (x,y)
y_p_e : array like
error bars in y
'''
logging.info('Plot results')
plt.close()
p1 = plt.errorbar(x_p * analysis_configuration['vcal_calibration'], y_p, yerr=y_p_e, fmt='o') # plot data with error bars
p2, = plt.plot(x_p * analysis_configuration['vcal_calibration'], smoothed_data, '-r') # plot smoothed data
factor = np.amax(y_p) / np.amin(smoothed_data_diff) * 1.1
p3, = plt.plot(x_p * analysis_configuration['vcal_calibration'], factor * smoothed_data_diff, '-', lw=2) # plot differentiated data
mpv_index = np.argmax(-analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1))
p4, = plt.plot([x_p[mpv_index] * analysis_configuration['vcal_calibration'], x_p[mpv_index] * analysis_configuration['vcal_calibration']], [0, factor * smoothed_data_diff[mpv_index]], 'k-', lw=2)
text = 'MPV ' + str(int(x_p[mpv_index] * analysis_configuration['vcal_calibration'])) + ' e'
plt.text(1.01 * x_p[mpv_index] * analysis_configuration['vcal_calibration'], -10. * smoothed_data_diff[mpv_index], text, ha='left')
plt.legend([p1, p2, p3, p4], ['data', 'smoothed spline', 'spline differentiation', text], prop={'size': 12}, loc=0)
plt.title('\'Single hit cluster\'-occupancy for different pixel thresholds')
plt.xlabel('Pixel threshold [e]')
plt.ylabel('Single hit cluster occupancy [a.u.]')
plt.ylim(0, np.amax(y_p) * 1.15)
if filename is None:
plt.show()
else:
filename.savefig(plt.gcf())
return smoothed_data_diff
def analyze_raw_data(input_files, output_file_hits, scan_parameter):
logging.info('Analyze the raw FE data given in ' + str(len(input_files)) + ' files and store the needed data')
if os.path.isfile(output_file_hits) and not analysis_configuration['overwrite_output_files']: # skip analysis if already done
logging.warning('Analyzed data file ' + output_file_hits + ' already exists. Skip analysis for this file.')
else:
with AnalyzeRawData(raw_data_file=input_files, analyzed_data_file=output_file_hits, scan_parameter_name=scan_parameter) as analyze_raw_data:
analyze_raw_data.create_hit_table = True # can be set to false to omit hit table creation, std. setting is false
analyze_raw_data.create_cluster_table = True # enables the creation of a table with all clusters, std. setting is false
analyze_raw_data.create_source_scan_hist = True # create source scan hists
analyze_raw_data.create_cluster_size_hist = True # enables cluster size histogramming, can save some time, std. setting is false
analyze_raw_data.create_cluster_tot_hist = True # enables cluster ToT histogramming per cluster size, std. setting is false
analyze_raw_data.interpreter.set_warning_output(analysis_configuration['interpreter_warnings']) # std. setting is True
analyze_raw_data.clusterizer.set_warning_output(analysis_configuration['interpreter_warnings']) # std. setting is True
analyze_raw_data.interpreter.debug_events(0, 10, False) # events to be printed onto the console for debugging, usually deactivated
analyze_raw_data.interpret_word_table() # the actual start conversion command
analyze_raw_data.interpreter.print_summary() # prints the interpreter summary
analyze_raw_data.plot_histograms() # plots all activated histograms into one pdf
def analyse_selected_hits(input_file_hits, output_file_hits, output_file_hits_analyzed, scan_data_filenames, cluster_size_condition='cluster_size==1', n_cluster_condition='n_cluster==1'):
logging.info('Analyze selected hits with ' + cluster_size_condition + ' and ' + n_cluster_condition + ' in ' + input_file_hits)
if os.path.isfile(output_file_hits) and not analysis_configuration["overwrite_output_files"]: # skip analysis if already done
logging.warning('Selected hit data file ' + output_file_hits + ' already exists. Skip analysis for this file.')
else:
analysis.select_hits_from_cluster_info(input_file_hits=input_file_hits, output_file_hits=output_file_hits, cluster_size_condition=cluster_size_condition, n_cluster_condition=n_cluster_condition) # select hits and copy the mto new file
if os.path.isfile(output_file_hits_analyzed) and not analysis_configuration["overwrite_output_files"]: # skip analysis if already done
logging.warning('Analyzed selected hit data file ' + output_file_hits_analyzed + ' already exists. Skip analysis for this file.')
else:
logging.info('Analyze selected hits in ' + output_file_hits)
with AnalyzeRawData(raw_data_file=None, analyzed_data_file=output_file_hits) as analyze_raw_data:
analyze_raw_data.create_source_scan_hist = True
analyze_raw_data.create_tot_hist = False
analyze_raw_data.create_cluster_size_hist = True
analyze_raw_data.create_cluster_tot_hist = True
analyze_raw_data.analyze_hit_table(analyzed_data_out_file=output_file_hits_analyzed)
analyze_raw_data.plot_histograms(pdf_filename=output_file_hits_analyzed, analyzed_data_file=output_file_hits_analyzed)
with tb.open_file(input_file_hits, mode="r") as in_hit_file_h5: # copy meta data to the new analyzed file
with tb.open_file(output_file_hits_analyzed, mode="r+") as output_hit_file_h5:
in_hit_file_h5.root.meta_data.copy(output_hit_file_h5.root) # copy meta_data note to new file
def analyze_injected_charge(data_analyzed_file):
logging.info('Analyze the injected charge')
with tb.open_file(data_analyzed_file, mode="r") as in_file_h5:
occupancy = in_file_h5.root.HistOcc[:].T
gdacs = analysis_utils.get_scan_parameter(in_file_h5.root.meta_data[:])['GDAC']
with PdfPages(os.path.splitext(data_analyzed_file)[0] + '.pdf') as plot_file:
plotting.plot_scatter(gdacs, occupancy.sum(axis=(0, 1)), title='Single pixel hit rate at different thresholds', x_label='Threshold setting [GDAC]', y_label='Single pixel hit rate', log_x=True, filename=plot_file)
if analysis_configuration['input_file_calibration']:
with tb.open_file(analysis_configuration['input_file_calibration'], mode="r") as in_file_calibration_h5: # read calibration file from calibrate_threshold_gdac scan
mean_threshold_calibration = in_file_calibration_h5.root.MeanThresholdCalibration[:]
threshold_calibration_array = in_file_calibration_h5.root.HistThresholdCalibration[:]
gdac_range_calibration = np.array(in_file_calibration_h5.root.HistThresholdCalibration._v_attrs.scan_parameter_values)
gdac_range_source_scan = gdacs
# Select data that is within the given GDAC range, (min_gdac, max_gdac)
sel = np.where(np.logical_and(gdac_range_source_scan >= analysis_configuration['min_gdac'], gdac_range_source_scan <= analysis_configuration['max_gdac']))[0]
gdac_range_source_scan = gdac_range_source_scan[sel]
occupancy = occupancy[:, :, sel]
sel = np.where(np.logical_and(gdac_range_calibration >= analysis_configuration['min_gdac'], gdac_range_calibration <= analysis_configuration['max_gdac']))[0]
gdac_range_calibration = gdac_range_calibration[sel]
threshold_calibration_array = threshold_calibration_array[:, :, sel]
logging.info('Analyzing source scan data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_source_scan), np.min(gdac_range_source_scan), np.max(gdac_range_source_scan), np.min(np.gradient(gdac_range_source_scan)), np.max(np.gradient(gdac_range_source_scan)))
logging.info('Use calibration data with %d GDAC settings from %d to %d with minimum step sizes from %d to %d', len(gdac_range_calibration), np.min(gdac_range_calibration), np.max(gdac_range_calibration), np.min(np.gradient(gdac_range_calibration)), np.max(np.gradient(gdac_range_calibration)))
# rate_normalization of the total hit number for each GDAC setting
rate_normalization = 1.
if analysis_configuration['normalize_rate']:
rate_normalization = analysis_utils.get_rate_normalization(hit_file=hit_file, cluster_file=hit_file, parameter='GDAC', reference=analysis_configuration['normalization_reference'], plot=analysis_configuration['plot_normalization'])
# correcting the hit numbers for the different cluster sizes
correction_factors = 1.
if analysis_configuration['use_cluster_rate_correction']:
correction_h5 = tb.open_file(cluster_sizes_file, mode="r")
cluster_size_histogram = correction_h5.root.AllHistClusterSize[:]
correction_factors = analysis_utils.get_hit_rate_correction(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_source_scan, cluster_size_histogram=cluster_size_histogram)
if analysis_configuration['plot_cluster_sizes']:
plot_cluster_sizes(correction_h5, in_file_calibration_h5, gdac_range=gdac_range_source_scan)
pixel_thresholds = analysis_utils.get_pixel_thresholds_from_calibration_array(gdacs=gdac_range_source_scan, calibration_gdacs=gdac_range_calibration, threshold_calibration_array=threshold_calibration_array) # interpolates the threshold at the source scan GDAC setting from the calibration
pixel_hits = occupancy # create hit array with shape (col, row, ...)
pixel_hits = pixel_hits * correction_factors * rate_normalization
# choose region with pixels that have a sufficient occupancy but are not too hot
good_pixel = analysis_utils.select_good_pixel_region(pixel_hits, col_span=analysis_configuration['col_span'], row_span=analysis_configuration['row_span'], min_cut_threshold=analysis_configuration['min_cut_threshold'], max_cut_threshold=analysis_configuration['max_cut_threshold'])
pixel_mask = ~np.ma.getmaskarray(good_pixel)
selected_pixel_hits = pixel_hits[pixel_mask, :] # reduce the data to pixels that are in the good pixel region
selected_pixel_thresholds = pixel_thresholds[pixel_mask, :] # reduce the data to pixels that are in the good pixel region
plotting.plot_occupancy(good_pixel.T, title='Selected pixel for analysis (' + str(len(selected_pixel_hits)) + ')', filename=plot_file)
# reshape to one dimension
x = selected_pixel_thresholds.flatten()
y = selected_pixel_hits.flatten()
# nothing should be NAN/INF, NAN/INF is not supported yet
if np.isfinite(x).shape != x.shape or np.isfinite(y).shape != y.shape:
logging.warning('There are pixels with NaN or INF threshold or hit values, analysis will fail')
# calculated profile histogram
x_p, y_p, y_p_e = analysis_utils.get_profile_histogram(x, y, n_bins=analysis_configuration['n_bins']) # profile histogram data
# select only the data point where the calibration worked
selected_data = np.logical_and(x_p > analysis_configuration['min_thr'] / analysis_configuration['vcal_calibration'], x_p < analysis_configuration['max_thr'] / analysis_configuration['vcal_calibration'])
x_p = x_p[selected_data]
y_p = y_p[selected_data]
y_p_e = y_p_e[selected_data]
if len(y_p_e[y_p_e == 0]) != 0:
logging.warning('There are bins without any data, guessing the error bars')
y_p_e[y_p_e == 0] = np.amin(y_p_e[y_p_e != 0])
smoothed_data = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=0)
smoothed_data_diff = analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1)
with tb.open_file(os.path.splitext(data_analyzed_file)[0] + '_result.h5', mode="w") as out_file_h5:
result_1 = np.rec.array(np.column_stack((x_p, y_p, y_p_e)), dtype=[('charge', float), ('count', float), ('count_error', float)])
result_2 = np.rec.array(np.column_stack((x_p, smoothed_data)), dtype=[('charge', float), ('count', float)])
result_3 = np.rec.array(np.column_stack((x_p, -smoothed_data_diff)), dtype=[('charge', float), ('count', float)])
out_1 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogram', description=result_1.dtype, title='Single pixel count rate combined with a profile histogram', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_2 = out_file_h5.create_table(out_file_h5.root, name='ProfileHistogramSpline', description=result_2.dtype, title='Single pixel count rate combined with a profile histogram and spline smoothed', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_3 = out_file_h5.create_table(out_file_h5.root, name='ChargeHistogram', description=result_3.dtype, title='Charge histogram with threshold method and per pixel calibration', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
for key, value in analysis_configuration.iteritems():
out_1.attrs[key] = value
out_2.attrs[key] = value
out_3.attrs[key] = value
out_1.append(result_1)
out_2.append(result_2)
out_3.append(result_3)
plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff)
# calculate and plot mean results
x_mean = analysis_utils.get_mean_threshold_from_calibration(gdac_range_source_scan, mean_threshold_calibration)
y_mean = selected_pixel_hits.mean(axis=(0))
plotting.plot_scatter(np.array(gdac_range_source_scan), y_mean, log_x=True, plot_range=None, title='Mean single pixel cluster rate at different thresholds', x_label='threshold setting [GDAC]', y_label='mean single pixel cluster rate', filename=plot_file)
plotting.plot_scatter(x_mean * analysis_configuration['vcal_calibration'], y_mean, plot_range=(analysis_configuration['min_thr'], analysis_configuration['max_thr']), title='Mean single pixel cluster rate at different thresholds', x_label='mean threshold [e]', y_label='mean single pixel cluster rate', filename=plot_file)
if analysis_configuration['use_cluster_rate_correction']:
correction_h5.close()
if __name__ == "__main__":
data_files = analysis_utils.get_data_file_names_from_scan_base(analysis_configuration['scan_name'])
files_dict = analysis_utils.get_parameter_from_files(data_files, unique=True, parameters='GDAC') # get a sorted ordered dict with GDAC, raw_data_filename
logging.info('Found ' + str(len(files_dict)) + ' raw data files.')
hit_file = analysis_configuration['scan_name'][0] + '_interpreted.h5'
hit_cut_file = analysis_configuration['scan_name'][0] + '_cut_hits.h5'
hit_cut_analyzed_file = analysis_configuration['scan_name'][0] + '_cut_hits_analyzed.h5'
cluster_seed_analyzed_file = analysis_configuration['scan_name'][0] + '_cluster_seeds_analyzed.h5'
cluster_sizes_file = analysis_configuration['scan_name'][0] + '_ALL_cluster_sizes.h5'
if 1 in analysis_configuration['analysis_steps']:
analyze_raw_data(input_files=files_dict.keys(), output_file_hits=hit_file, scan_parameter='GDAC')
if 2 in analysis_configuration['analysis_steps']:
analyse_selected_hits(input_file_hits=hit_file, output_file_hits=hit_cut_file, output_file_hits_analyzed=hit_cut_analyzed_file, scan_data_filenames=analysis_configuration['scan_name'][0])
if 2.5 in analysis_configuration['analysis_steps']:
if os.path.isfile(cluster_seed_analyzed_file) and not analysis_configuration["overwrite_output_files"]:
logging.warning('Selected cluster hit histogram data file ' + cluster_seed_analyzed_file + ' already exists. Skip analysis for this file.')
else:
analysis.histogram_cluster_table(hit_file, cluster_seed_analyzed_file)
if 3 in analysis_configuration['analysis_steps']:
analysis.analyze_cluster_size_per_scan_parameter(input_file_hits=hit_file, output_file_cluster_size=cluster_sizes_file, parameter='GDAC', overwrite_output_files=analysis_configuration['overwrite_output_files'], output_pdf=False)
if 4 in analysis_configuration['analysis_steps']:
analyze_injected_charge(data_analyzed_file=cluster_seed_analyzed_file)
|
bsd-3-clause
|
wzbozon/scikit-learn
|
sklearn/decomposition/tests/test_truncated_svd.py
|
240
|
6055
|
"""Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
|
bsd-3-clause
|
plstcharles/opengm
|
src/interfaces/python/examples/python_visitor_gui.py
|
14
|
1377
|
"""
Usage: python_visitor_gui.py
This script shows how one can implement visitors
in pure python and inject them into OpenGM solver.
( not all OpenGM solvers support this kind of
code injection )
"""
import opengm
import numpy
import matplotlib
from matplotlib import pyplot as plt
shape=[100,100]
numLabels=10
unaries=numpy.random.rand(shape[0], shape[1],numLabels)
potts=opengm.PottsFunction([numLabels,numLabels],0.0,0.4)
gm=opengm.grid2d2Order(unaries=unaries,regularizer=potts)
inf=opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(damping=0.5))
class PyCallback(object):
def __init__(self,shape,numLabels):
self.shape=shape
self.numLabels=numLabels
self.cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( self.numLabels,3))
matplotlib.interactive(True)
def begin(self,inference):
print "begin of inference"
def end(self,inference):
print "end of inference"
def visit(self,inference):
gm=inference.gm()
labelVector=inference.arg()
print "energy ",gm.evaluate(labelVector)
labelVector=labelVector.reshape(self.shape)
plt.imshow(labelVector*255.0, cmap=self.cmap,interpolation="nearest")
plt.draw()
callback=PyCallback(shape,numLabels)
visitor=inf.pythonVisitor(callback,visitNth=1)
inf.infer(visitor)
argmin=inf.arg()
|
mit
|
imk1/IMKTFBindingCode
|
runFIMOLogisticRegression.py
|
1
|
10386
|
import sys
import argparse
import math
import numpy as np
from Bio import SeqIO
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss, roc_auc_score, accuracy_score, f1_score, recall_score, precision_score
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description="Run a logistic regression with -log10(FIMO q-values) as features")
parser.add_argument("--FIMOPosFileName", required=True, help="Name of file with output from FIMO on the positive set")
parser.add_argument("--FIMONegFileName", required=True, help="Name of file with output from FIMO on the negative set")
parser.add_argument("--positiveFastaFileName", required=True, help="Name of file with fastas from the positive set")
parser.add_argument("--negativeFastaFileName", required=True, help="Name of file with fastas from the negative set")
parser.add_argument("--peakCoordCol", required=False, type=int, default=2, help="Column number of column with peak coordinates")
parser.add_argument("--trainChroms", required=False,\
default=["chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr3", "chr4", "chr5", "chr6", \
"chr7", "chrX", "chr8", "chr9"],\
help="Name of chromosomes that will be used for training")
parser.add_argument("--validChroms", required=False, default=["chr1", "chr2"], help="Name of chromosomes that will be used for validation")
parser.add_argument("--qValCutoff", type=float, required=False, default=0.5, \
help='Fold change cutoff for determining whether somethng can be in one of the sets') #0.75 is possible
parser.add_argument("--l2Penalty", type=float, required=False, default=1.0, \
help='l2 penalty for logistic regression')
parser.add_argument("--deeplearningPath", required=False, \
default="/srv/scratch/imk1/TFBindingPredictionProject/src/deeplearning", \
help='Full path to deeplearning directory')
parser.add_argument("--targetfinderPath", required=False, default="/srv/scratch/imk1/TFBindingPredictionProject/src/targetfinder", \
help="Full path to targetfinder directory")
options = parser.parse_args()
return options
def addToMotifDict(motifDict, peak, qVal):
# Add the current peak to the motif dictionary with its q-value unless it is already present with a lower q-value
if peak not in motifDict:
# Create a new entry in the core dictionary for the peak
motifDict[peak] = 0 - math.log(qVal, 10)
else:
# Change the value in the dictionary if the current q-value is smaller
if 0 - math.log(qVal, 10) > motifDict[peak]:
# The current q-value is smaller, so change it
motifDict[peak] = 0 - math.log(qVal, 10)
return motifDict
def makeFIMODicts(FIMOFileName, options):
# Make core, upstream, and downstream dictionaries for the FIMO file
coreDict = {}
upstreamDict = {}
downstreamDict = {}
FIMOFile = open(FIMOFileName)
FIMOFile.readline() # Remove the header
for line in FIMOFile:
# Iterate through the lines of the file from FIMO and make each one an entry in a dictionary
lineElements = line.strip().split("\t")
qVal = float(lineElements[7])
if qVal >= options.qValCutoff:
# The match is not significant, so skip it
continue
peakElements = lineElements[options.peakCoordCol].split(":")
if (peakElements[0] not in options.trainChroms) and (peakElements[0] not in options.validChroms):
# The peak will not be used for training or validation, so skip it
continue
positionElements = peakElements[1].split("-")
peak = (peakElements[0], int(positionElements[0]), int(positionElements[1]))
if (lineElements[0] == "core") or (lineElements[0] == "Core"):
# Put the peak into the core dictionary
coreDict = addToMotifDict(coreDict, peak, qVal)
elif (lineElements[0] == "upstream") or (lineElements[0] == "Upstream"):
# Put the peak into the upstream dictionary
upstreamDict = addToMotifDict(upstreamDict, peak, qVal)
else:
# Put the peak into the downstream dictionary
assert ((lineElements[0] == "downstream") or (lineElements[0] == "Downstream"))
downstreamDict = addToMotifDict(downstreamDict, peak, qVal)
FIMOFile.close()
return [coreDict, upstreamDict, downstreamDict]
def makeFeaturesFromFIMOOutput(coreDict, upstreamDict, downstreamDict, fastaFileName, options):
# Create -log10(q-value) features for each fasta from the FIMO output
featuresTrain = []
featuresValid = []
for record in SeqIO.parse(fastaFileName, "fasta"):
# Iterate through the peaks and get the features for each
peakElements = str(record.id).split(":")
if (peakElements[0] not in options.trainChroms) and (peakElements[0] not in options.validChroms):
# The peak will not be used for training or validation, so skip it
continue
positionElements = peakElements[1].split("-")
peak = (peakElements[0], int(positionElements[0]), int(positionElements[1]))
featuresPeak = np.zeros(3)
if peak in coreDict:
# The peak has a core motif
featuresPeak[0] = coreDict[peak]
if peak in upstreamDict:
# The peak has an upstream motif
featuresPeak[1] = upstreamDict[peak]
if peak in downstreamDict:
# The peak has a downstream motif
featuresPeak[2] = downstreamDict[peak]
if peak[0] in options.trainChroms:
# The peak is in the training set
featuresTrain.append(featuresPeak)
else:
# The peak is in the validation set
featuresValid.append(featuresPeak)
return [np.array(featuresTrain), np.array(featuresValid)]
def makeClassPredictionsScikit(XTr, XTe, model):
# Make the predictions on the training and validation sets
trainPredictedClasses = model.predict(XTr);
testPredictedClasses = model.predict(XTe);
trainPredictedProba = model.predict_proba(XTr)[:,1];
testPredictedProba = model.predict_proba(XTe)[:,1];
return [trainPredictedClasses, testPredictedClasses, trainPredictedProba, testPredictedProba];
def evaluateModel(model, XTr, YTr, XTe, YTe):
# Evaluate the results of the model
sys.path.insert(0, options.deeplearningPath)
from auPRG_with_rpy2 import auPRG_R # Requres r >= 3.2.0
sys.path.insert(0, options.targetfinderPath)
from MLEvaluationOperations import negative_accuracy, auprc, recall_at_fdr
[trainPredictedClasses, testPredictedClasses, trainPredictedProba, testPredictedProba] = makeClassPredictionsScikit(XTr, XTe, model)
trainAcc = accuracy_score(YTr, trainPredictedClasses)
testAcc = accuracy_score(YTe, testPredictedClasses)
# sensitivity
trainSens = recall_score(YTr, trainPredictedClasses)
testSens = recall_score(YTe, testPredictedClasses)
# specificity
trainSpec = negative_accuracy(YTr, trainPredictedClasses)
testSpec = negative_accuracy(YTe, testPredictedClasses)
# roc auc
trainAUC = roc_auc_score(YTr, trainPredictedProba)
testAUC = roc_auc_score(YTe, testPredictedProba)
# precision
trainPrecision = precision_score(YTr, trainPredictedClasses)
testPrecision = precision_score(YTe, testPredictedClasses)
# negative predictive value
trainNPV = precision_score(1 - YTr, 1 - trainPredictedClasses)
testNPV = precision_score(1 - YTe, 1 - testPredictedClasses)
yTrTrue = YTr
yTrScore = trainPredictedProba
yTeTrue = YTe
yTeScore = testPredictedProba
if np.sum(yTrTrue) > 0.5*yTrTrue.shape[0]:
# Swap the negative and positive labels
print("Majority of examples are positives, so evaluating for negative set")
yTrTrue = 1 - yTrTrue
yTrScore = 1 - yTrScore
yTeTrue = 1 - yTeTrue
yTeScore = 1 - yTeScore
# auPRC
trainAUPRC = auprc(yTrTrue, yTrScore, path=options.targetfinderPath + "/")
testAUPRC = auprc(yTeTrue, yTeScore, path=options.targetfinderPath + "/")
# auPRG
trainAUPRG = auPRG_R(yTrTrue, yTrScore)
testAUPRG = auPRG_R(yTeTrue, yTeScore)
# F1
trainF1 = f1_score(YTr, trainPredictedClasses)
testF1 = f1_score(YTe, testPredictedClasses)
# Recall at FDR 0.05
trainRecallAt05FDR = recall_at_fdr(yTrTrue, yTrScore, fdr_cutoff=0.05)
testRecallAt05FDR = recall_at_fdr(yTeTrue, yTeScore, fdr_cutoff=0.05)
# Recall at FDR 0.1
trainRecallAt1FDR = recall_at_fdr(yTrTrue, yTrScore, fdr_cutoff=0.1)
testRecallAt1FDR = recall_at_fdr(yTeTrue, yTeScore, fdr_cutoff=0.1)
# Recall at FDR 0.2
trainRecallAt2FDR = recall_at_fdr(yTrTrue, yTrScore, fdr_cutoff=0.2)
testRecallAt2FDR = recall_at_fdr(yTeTrue, yTeScore, fdr_cutoff=0.2)
# cross entropy loss
trainLoss = log_loss(YTr, trainPredictedProba)
testLoss = log_loss(YTe, testPredictedProba)
# Print results
print("Training -- loss: %f, accuracy: %f, sensitivity: %f, specificity: %f, auc: %f, precision: %f, NPV: %f, auprc: %f, auprg: %f, F1: %f, recallAt0.05FDR: %f, recallAt0.1FDR: %f, recallAt0.2FDR: %f"
% (trainLoss, trainAcc, trainSens, trainSpec, trainAUC, trainPrecision, trainNPV, trainAUPRC, trainAUPRG, trainF1, \
trainRecallAt05FDR, trainRecallAt1FDR, trainRecallAt2FDR))
print("Testing -- loss: %f, accuracy: %f, sensitivity: %f, specificity: %f, auc: %f, precision: %f, NPV: %f, auprc: %f, auprg: %f, F1: %f, recallAt0.05FDR: %f, recallAt0.1FDR: %f, recallAt0.2FDR: %f"
% (testLoss, testAcc, testSens, testSpec, testAUC, testPrecision, testNPV, testAUPRC, testAUPRG, testF1, \
testRecallAt05FDR, testRecallAt1FDR, testRecallAt2FDR))
def runFIMOLogisticRegression(options):
# Run a logistic regression with -log10(FIMO q-values) as features
[coreDictPos, upstreamDictPos, downstreamDictPos] = makeFIMODicts(options.FIMOPosFileName, options)
[featuresTrainPos, featuresValidPos] =\
makeFeaturesFromFIMOOutput(coreDictPos, upstreamDictPos, downstreamDictPos, options.positiveFastaFileName, options)
[coreDictNeg, upstreamDictNeg, downstreamDictNeg] = makeFIMODicts(options.FIMONegFileName, options)
[featuresTrainNeg, featuresValidNeg] =\
makeFeaturesFromFIMOOutput(coreDictNeg, upstreamDictNeg, downstreamDictNeg, options.negativeFastaFileName, options)
labelsTrain = np.concatenate((np.ones(featuresTrainPos.shape[0]), np.zeros(featuresTrainNeg.shape[0])))
labelsValid = np.concatenate((np.ones(featuresValidPos.shape[0]), np.zeros(featuresValidNeg.shape[0])))
featuresTrain = np.vstack((featuresTrainPos, featuresTrainNeg))
featuresValid = np.vstack((featuresValidPos, featuresValidNeg))
model = LogisticRegression(C=options.l2Penalty, class_weight='balanced')
model.fit(featuresTrain, labelsTrain)
evaluateModel(model, featuresTrain, labelsTrain, featuresValid, labelsValid)
if __name__ == "__main__":
options = parseArgument()
runFIMOLogisticRegression(options)
|
mit
|
akrherz/iem
|
htdocs/plotting/auto/scripts100/p119.py
|
1
|
5512
|
"""Frequency of first fall low"""
import datetime
from pandas.io.sql import read_sql
import pandas as pd
import matplotlib.dates as mdates
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {"low": "Low Temperature", "high": "High Temperature"}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["report"] = True
desc[
"description"
] = """This chart presents the accumulated frequency of
having the first fall temperature at or below a given threshold."""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IATDSM",
label="Select Station",
network="IACLIMATE",
),
dict(
type="select",
options=PDICT,
name="var",
default="low",
label="Select variable to summarize:",
),
dict(type="int", name="t1", default=32, label="First Threshold (F)"),
dict(type="int", name="t2", default=28, label="Second Threshold (F)"),
dict(type="int", name="t3", default=26, label="Third Threshold (F)"),
dict(type="int", name="t4", default=22, label="Fourth Threshold (F)"),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
thresholds = [ctx["t1"], ctx["t2"], ctx["t3"], ctx["t4"]]
table = "alldata_%s" % (station[:2],)
# Load up dict of dates..
sz = 214 + 304
df = pd.DataFrame(
{
"dates": pd.date_range("2000/08/01", "2001/05/31"),
"%scnts" % (thresholds[0],): 0,
"%scnts" % (thresholds[1],): 0,
"%scnts" % (thresholds[2],): 0,
"%scnts" % (thresholds[3],): 0,
},
index=range(214, sz),
)
df.index.name = "doy"
for base in thresholds:
# Find first dates by winter season
df2 = read_sql(
f"""
select
case when month > 7 then year + 1 else year end as winter,
min(case when {ctx["var"]} <= %s
then day else '2099-01-01'::date end) as mindate from {table}
WHERE month not in (6, 7) and station = %s and year < %s
GROUP by winter
""",
pgconn,
params=(base, station, datetime.date.today().year),
index_col=None,
)
for _, row in df2.iterrows():
if row["mindate"].year == 2099:
continue
jan1 = datetime.date(row["winter"] - 1, 1, 1)
doy = (row["mindate"] - jan1).days
df.loc[doy:sz, "%scnts" % (base,)] += 1
df[f"{base}freq"] = df[f"{base}cnts"] / len(df2.index) * 100.0
bs = ctx["_nt"].sts[station]["archive_begin"]
if bs is None:
raise NoDataFound("Unknown metadata")
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: Daryl Herzmann [email protected] 515.294.5978
# %s exceedence probabilities
# (On a certain date, what is the chance a temperature below a certain
# threshold would have been observed once already during the fall of that year)
DOY Date <%s <%s <%s <%s
""" % (
datetime.date.today().strftime("%d %b %Y"),
bs.date(),
datetime.date.today(),
station,
ctx["_nt"].sts[station]["name"],
PDICT[ctx["var"]],
thresholds[0] + 1,
thresholds[1] + 1,
thresholds[2] + 1,
thresholds[3] + 1,
)
fcols = ["%sfreq" % (s,) for s in thresholds]
mindate = None
maxdate = None
for doy, row in df.iterrows():
if doy % 2 != 0:
continue
if row[fcols[3]] >= 100:
if maxdate is None:
maxdate = row["dates"] + datetime.timedelta(days=5)
continue
if row[fcols[0]] > 0 and mindate is None:
mindate = row["dates"] - datetime.timedelta(days=5)
res += (" %3s %s %3i %3i %3i %3i\n") % (
row["dates"].strftime("%-j"),
row["dates"].strftime("%b %d"),
row[fcols[0]],
row[fcols[1]],
row[fcols[2]],
row[fcols[3]],
)
if maxdate is None:
maxdate = datetime.datetime(2001, 6, 1)
title = (
"Frequency of First Fall %s At or Below Threshold\n%s %s (%s-%s)"
) % (
PDICT[ctx["var"]],
station,
ctx["_nt"].sts[station]["name"],
bs.year,
datetime.date.today().year,
)
(fig, ax) = figure_axes(title=title)
for base in thresholds:
ax.plot(
df["dates"].values,
df["%sfreq" % (base,)],
label="%s" % (base,),
lw=2,
)
ax.legend(loc="best")
ax.set_xlim(mindate, maxdate)
days = (maxdate - mindate).days
dl = [1] if days > 120 else [1, 7, 14, 21]
ax.xaxis.set_major_locator(mdates.DayLocator(dl))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%-d\n%b"))
ax.grid(True)
ax.set_yticks([0, 10, 25, 50, 75, 90, 100])
ax.set_ylabel("Accumulated to Date Frequency [%]")
df = df.reset_index()
return fig, df, res
if __name__ == "__main__":
plotter({"var": "high", "t4": 0})
|
mit
|
treesnail/tushare
|
tushare/stock/macro.py
|
37
|
12728
|
# -*- coding:utf-8 -*-
"""
宏观经济数据接口
Created on 2015/01/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
import re
import json
from tushare.stock import macro_vars as vs
from tushare.stock import cons as ct
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_gdp_year():
"""
获取年度国内生产总值数据
Return
--------
DataFrame
year :统计年度
gdp :国内生产总值(亿元)
pc_gdp :人均国内生产总值(元)
gnp :国民生产总值(亿元)
pi :第一产业(亿元)
si :第二产业(亿元)
industry :工业(亿元)
cons_industry :建筑业(亿元)
ti :第三产业(亿元)
trans_industry :交通运输仓储邮电通信业(亿元)
lbdy :批发零售贸易及餐饮业(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 0, 70,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_YEAR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_quarter():
"""
获取季度国内生产总值数据
Return
--------
DataFrame
quarter :季度
gdp :国内生产总值(亿元)
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业增加值(亿元)
pi_yoy:第一产业增加值同比增长(%)
si :第二产业增加值(亿元)
si_yoy :第二产业增加值同比增长(%)
ti :第三产业增加值(亿元)
ti_yoy :第三产业增加值同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 1, 250,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_QUARTER_COLS)
df['quarter'] = df['quarter'].astype(object)
df[df==0] = np.NaN
return df
def get_gdp_for():
"""
获取三大需求对GDP贡献数据
Return
--------
DataFrame
year :统计年度
end_for :最终消费支出贡献率(%)
for_rate :最终消费支出拉动(百分点)
asset_for :资本形成总额贡献率(%)
asset_rate:资本形成总额拉动(百分点)
goods_for :货物和服务净出口贡献率(%)
goods_rate :货物和服务净出口拉动(百分点)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 4, 80, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"','').replace('null','0')
js = json.loads(datastr)
df = pd.DataFrame(js,columns=vs.GDP_FOR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_pull():
"""
获取三大产业对GDP拉动数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业拉动率(%)
si :第二产业拉动率(%)
industry:其中工业拉动(%)
ti :第三产业拉动率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 5, 60, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_PULL_COLS)
df[df==0] = np.NaN
return df
def get_gdp_contrib():
"""
获取三大产业贡献率数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值
pi :第一产业献率(%)
si :第二产业献率(%)
industry:其中工业献率(%)
ti :第三产业献率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], rdint,
vs.MACRO_TYPE[0], 6, 60, rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_CONTRIB_COLS)
df[df==0] = np.NaN
return df
def get_cpi():
"""
获取居民消费价格指数数据
Return
--------
DataFrame
month :统计月份
cpi :价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 0, 600,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.CPI_COLS)
df['cpi'] = df['cpi'].astype(float)
return df
def get_ppi():
"""
获取工业品出厂价格指数数据
Return
--------
DataFrame
month :统计月份
ppiip :工业品出厂价格指数
ppi :生产资料价格指数
qm:采掘工业价格指数
rmi:原材料工业价格指数
pi:加工工业价格指数
cg:生活资料价格指数
food:食品类价格指数
clothing:衣着类价格指数
roeu:一般日用品价格指数
dcg:耐用消费品价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 3, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.PPI_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, np.NaN, x))
if i != 'month':
df[i] = df[i].astype(float)
return df
def get_deposit_rate():
"""
获取存款利率数据
Return
--------
DataFrame
date :变动日期
deposit_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 2, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.DEPOSIT_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_loan_rate():
"""
获取贷款利率数据
Return
--------
DataFrame
date :执行日期
loan_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 3, 800,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.LOAN_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_rrr():
"""
获取存款准备金率数据
Return
--------
DataFrame
date :变动日期
before :调整前存款准备金率(%)
now:调整后存款准备金率(%)
changed:调整幅度(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 4, 100,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.RRR_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply():
"""
获取货币供应量数据
Return
--------
DataFrame
month :统计时间
m2 :货币和准货币(广义货币M2)(亿元)
m2_yoy:货币和准货币(广义货币M2)同比增长(%)
m1:货币(狭义货币M1)(亿元)
m1_yoy:货币(狭义货币M1)同比增长(%)
m0:流通中现金(M0)(亿元)
m0_yoy:流通中现金(M0)同比增长(%)
cd:活期存款(亿元)
cd_yoy:活期存款同比增长(%)
qm:准货币(亿元)
qm_yoy:准货币同比增长(%)
ftd:定期存款(亿元)
ftd_yoy:定期存款同比增长(%)
sd:储蓄存款(亿元)
sd_yoy:储蓄存款同比增长(%)
rests:其他存款(亿元)
rests_yoy:其他存款同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 1, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply_bal():
"""
获取货币供应量(年底余额)数据
Return
--------
DataFrame
year :统计年度
m2 :货币和准货币(亿元)
m1:货币(亿元)
m0:流通中现金(亿元)
cd:活期存款(亿元)
qm:准货币(亿元)
ftd:定期存款(亿元)
sd:储蓄存款(亿元)
rests:其他存款(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 0, 200,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_BLA_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
|
bsd-3-clause
|
jinghaomiao/apollo
|
modules/tools/routing/road_show.py
|
3
|
4618
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Show road."""
import sys
import matplotlib.pyplot as plt
import modules.tools.common.proto_utils as proto_utils
import modules.tools.routing.util as util
g_color = [
'navy', 'c', 'cornflowerblue', 'gold', 'darkorange', 'darkviolet',
'aquamarine', 'firebrick', 'limegreen'
]
def draw_line(line_segment, color):
"""
:param line_segment:
:return: none
"""
px, py = proto_utils.flatten(line_segment.point, ['x', 'y'])
px, py = downsample_array(px), downsample_array(py)
plt.gca().plot(px, py, lw=10, alpha=0.8, color=color)
return px[len(px) // 2], py[len(py) // 2]
def draw_arc(arc):
"""
:param arc: proto obj
:return: none
"""
xy = (arc.center.x, arc.center.y)
start = 0
end = 0
if arc.start_angle < arc.end_angle:
start = arc.start_angle / math.pi * 180
end = arc.end_angle / math.pi * 180
else:
end = arc.start_angle / math.pi * 180
start = arc.end_angle / math.pi * 180
pac = mpatches.Arc(
xy, arc.radius * 2, arc.radius * 2, angle=0, theta1=start, theta2=end)
plt.gca().add_patch(pac)
def downsample_array(array):
"""down sample given array"""
skip = 5
result = array[::skip]
result.append(array[-1])
return result
def draw_boundary(line_segment):
"""
:param line_segment:
:return:
"""
px, py = proto_utils.flatten(line_segment.point, ['x', 'y'])
px, py = downsample_array(px), downsample_array(py)
plt.gca().plot(px, py, 'k')
def draw_id(x, y, id_string):
"""Draw id_string on (x, y)"""
plt.annotate(
id_string,
xy=(x, y),
xytext=(40, -40),
textcoords='offset points',
ha='right',
va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='green', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
def get_road_index_of_lane(lane_id, road_lane_set):
"""Get road index of lane"""
for i, lane_set in enumerate(road_lane_set):
if lane_id in lane_set:
return i
return -1
def draw_map(drivemap):
""" draw map from mapfile"""
print('Map info:')
print('\tVersion:\t', end=' ')
print(drivemap.header.version)
print('\tDate:\t', end=' ')
print(drivemap.header.date)
print('\tDistrict:\t', end=' ')
print(drivemap.header.district)
road_lane_set = []
for road in drivemap.road:
lanes = []
for sec in road.section:
lanes.extend(proto_utils.flatten(sec.lane_id, 'id'))
road_lane_set.append(lanes)
for lane in drivemap.lane:
for curve in lane.central_curve.segment:
if curve.HasField('line_segment'):
road_idx = get_road_index_of_lane(lane.id.id, road_lane_set)
if road_idx == -1:
print('Failed to get road index of lane')
sys.exit(-1)
center_x, center_y = draw_line(curve.line_segment,
g_color[road_idx % len(g_color)])
draw_id(center_x, center_y, str(road_idx))
# break
# if curve.HasField('arc'):
# draw_arc(curve.arc)
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
draw_boundary(curve.line_segment)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
draw_boundary(curve.line_segment)
# break
return drivemap
if __name__ == "__main__":
print("Reading map data")
map_dir = util.get_map_dir(sys.argv)
base_map = util.get_mapdata(map_dir)
print("Done reading map data")
plt.subplots()
draw_map(base_map)
plt.axis('equal')
plt.show()
|
apache-2.0
|
vinodgithubu/ml_lab_ecsc_306
|
labwork/lab2/sci-learn/non_linear_regression.py
|
120
|
1520
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
apache-2.0
|
astocko/statsmodels
|
statsmodels/graphics/plot_grids.py
|
33
|
5711
|
'''create scatterplot with confidence ellipsis
Author: Josef Perktold
License: BSD-3
TODO: update script to use sharex, sharey, and visible=False
see http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label
for sharex I need to have the ax of the last_row when editing the earlier
rows. Or you axes_grid1, imagegrid
http://matplotlib.sourceforge.net/mpl_toolkits/axes_grid/users/overview.html
'''
from statsmodels.compat.python import range
import numpy as np
from scipy import stats
from . import utils
__all__ = ['scatter_ellipse']
def _make_ellipse(mean, cov, ax, level=0.95, color=None):
"""Support function for scatter_ellipse."""
from matplotlib.patches import Ellipse
v, w = np.linalg.eigh(cov)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan(u[1]/u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2 * np.sqrt(v * stats.chi2.ppf(level, 2)) #get size corresponding to level
ell = Ellipse(mean[:2], v[0], v[1], 180 + angle, facecolor='none',
edgecolor=color,
#ls='dashed', #for debugging
lw=1.5)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
def scatter_ellipse(data, level=0.9, varnames=None, ell_kwds=None,
plot_kwds=None, add_titles=False, keep_ticks=False,
fig=None):
"""Create a grid of scatter plots with confidence ellipses.
ell_kwds, plot_kdes not used yet
looks ok with 5 or 6 variables, too crowded with 8, too empty with 1
Parameters
----------
data : array_like
Input data.
level : scalar, optional
Default is 0.9.
varnames : list of str, optional
Variable names. Used for y-axis labels, and if `add_titles` is True
also for titles. If not given, integers 1..data.shape[1] are used.
ell_kwds : dict, optional
UNUSED
plot_kwds : dict, optional
UNUSED
add_titles : bool, optional
Whether or not to add titles to each subplot. Default is False.
Titles are constructed from `varnames`.
keep_ticks : bool, optional
If False (default), remove all axis ticks.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
"""
fig = utils.create_mpl_fig(fig)
import matplotlib.ticker as mticker
data = np.asanyarray(data) #needs mean and cov
nvars = data.shape[1]
if varnames is None:
#assuming single digit, nvars<=10 else use 'var%2d'
varnames = ['var%d' % i for i in range(nvars)]
plot_kwds_ = dict(ls='none', marker='.', color='k', alpha=0.5)
if plot_kwds:
plot_kwds_.update(plot_kwds)
ell_kwds_= dict(color='k')
if ell_kwds:
ell_kwds_.update(ell_kwds)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
for i in range(1, nvars):
#print '---'
ax_last=None
for j in range(i):
#print i,j, i*(nvars-1)+j+1
ax = fig.add_subplot(nvars-1, nvars-1, (i-1)*(nvars-1)+j+1)
## #sharey=ax_last) #sharey doesn't allow empty ticks?
## if j == 0:
## print 'new ax_last', j
## ax_last = ax
## ax.set_ylabel(varnames[i])
#TODO: make sure we have same xlim and ylim
formatter = mticker.FormatStrFormatter('% 3.1f')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatter)
idx = np.array([j,i])
ax.plot(*data[:,idx].T, **plot_kwds_)
if np.isscalar(level):
level = [level]
for alpha in level:
_make_ellipse(dmean[idx], dcov[idx[:,None], idx], ax, level=alpha,
**ell_kwds_)
if add_titles:
ax.set_title('%s-%s' % (varnames[i], varnames[j]))
if not ax.is_first_col():
if not keep_ticks:
ax.set_yticks([])
else:
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
else:
ax.set_ylabel(varnames[i])
if ax.is_last_row():
ax.set_xlabel(varnames[j])
else:
if not keep_ticks:
ax.set_xticks([])
else:
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
dcorr = np.corrcoef(data, rowvar=0)
dc = dcorr[idx[:,None], idx]
xlim = ax.get_xlim()
ylim = ax.get_ylim()
## xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## if dc[1,0] < 0 :
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## else:
## yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
yrangeq = ylim[0] + 0.4 * (ylim[1] - ylim[0])
if dc[1,0] < -0.25 or (dc[1,0] < 0.25 and dmean[idx][1] > yrangeq):
yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
else:
yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
ax.text(xt, yt, '$\\rho=%0.2f$'% dc[1,0])
for ax in fig.axes:
if ax.is_last_row(): # or ax.is_first_col():
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
if ax.is_first_col():
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
return fig
|
bsd-3-clause
|
nmartensen/pandas
|
pandas/core/tools/numeric.py
|
11
|
5915
|
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import (
is_scalar,
is_numeric_dtype,
is_decimal,
is_datetime_or_timedelta_dtype,
is_number,
_ensure_object)
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas._libs import lib
def to_numeric(arg, errors='raise', downcast=None):
"""
Convert argument to a numeric type.
Parameters
----------
arg : list, tuple, 1-d array, or Series
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaN
- If 'ignore', then invalid parsing will return the input
downcast : {'integer', 'signed', 'unsigned', 'float'} , default None
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
.. versionadded:: 0.19.0
Returns
-------
ret : numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> import pandas as pd
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
See also
--------
pandas.DataFrame.astype : Cast argument to a specified dtype.
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
"""
if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'):
raise ValueError('invalid downcasting method provided')
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndexClass):
is_index = True
values = arg.asi8
if values is None:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype='O')
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a list, tuple, 1-d array, or Series')
else:
values = arg
try:
if is_numeric_dtype(values):
pass
elif is_datetime_or_timedelta_dtype(values):
values = values.astype(np.int64)
else:
values = _ensure_object(values)
coerce_numeric = False if errors in ('ignore', 'raise') else True
values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=coerce_numeric)
except Exception:
if errors == 'raise':
raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values):
typecodes = None
if downcast in ('integer', 'signed'):
typecodes = np.typecodes['Integer']
elif downcast == 'unsigned' and np.min(values) >= 0:
typecodes = np.typecodes['UnsignedInteger']
elif downcast == 'float':
typecodes = np.typecodes['Float']
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for dtype in typecodes:
if np.dtype(dtype).itemsize <= values.dtype.itemsize:
values = maybe_downcast_to_dtype(values, dtype)
# successful conversion
if values.dtype == dtype:
break
if is_series:
return pd.Series(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy_with_infer
return pd.Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values
|
bsd-3-clause
|
YongchaoShang/memex-explorer
|
source/apps/crawl_space/viz/plot.py
|
2
|
2271
|
from StringIO import StringIO
import pandas as pd
from harvest import Harvest
from domain import Domain
from .stream import init_plot
from django.conf import settings
ENABLE_STREAM_VIZ = settings.ENABLE_STREAM_VIZ
class PlotsNotReadyException(Exception):
pass
class AcheDashboard(object):
def __init__(self, crawl):
self.crawl = crawl
if self.crawl.crawler != "ache":
raise ValueError("Crawl must be using the Ache crawler.")
self.harvest = Harvest(crawl)
self.domain = Domain(crawl)
def get_harvest_plot(self):
# TODO: Remove Pokemon exception catching
try:
script, div = self.harvest.create()
except:
return [None, None]
return [script, div]
def get_domain_plot(self):
# TODO: Remove Pokemon exception catching
try:
script, div = self.domain.create()
except Exception:
return [None, None]
return [script, div]
def get_relevant_seeds(self):
# Converts string to StringIO to allow pandas to read it as a file
seeds = pd.read_csv(StringIO(self.domain.get_relevant_data()),
delimiter='\t', header=None,
names=['url', 'timestamp'])
return seeds['url'].to_dict().values()
def get_plots(self):
harvest_plot = self.get_harvest_plot()
domain_plot = self.get_domain_plot()
if harvest_plot != [None, None]:
return {
'scripts': [domain_plot[0], harvest_plot[0]],
'divs': [domain_plot[1], harvest_plot[1]],
}
else:
return {
'scripts': None,
'divs': None,
}
class NutchDashboard(object):
def __init__(self, crawl):
self.crawl = crawl
if self.crawl.crawler != "nutch":
raise ValueError("Crawl must be using the Nutch crawler.")
def get_plots(self):
# TODO: For simultaneous crawl monitoring need to use unique crawl ids
if ENABLE_STREAM_VIZ:
script = init_plot(self.crawl.name)
else:
script = None
return {
'scripts': [script],
'divs': [],
}
|
bsd-2-clause
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/matplotlib/transforms.py
|
7
|
96105
|
"""
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from numpy import ma
from matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox,
update_path_extents)
from numpy.linalg import inv
from weakref import WeakValueDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
from .path import Path
DEBUG = False
# we need this later, but this is very expensive to set up
MINFLOAT = np.MachAr(float).xmin
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
pass_through = False
"""
If pass_through is True, all ancestors will always be
invalidated, even if 'self' is already invalid.
"""
def __init__(self, shorthand_name=None):
"""
Creates a new :class:`TransformNode`.
**shorthand_name** - a string representing the "name" of this
transform. The name carries no significance
other than to improve the readability of
``str(transform)`` when DEBUG=True.
"""
# Parents are stored in a WeakValueDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakValueDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
self._shorthand_name = shorthand_name or ''
if DEBUG:
def __str__(self):
# either just return the name of this TransformNode, or it's repr
return self._shorthand_name or repr(self)
def __getstate__(self):
d = self.__dict__.copy()
# turn the weakkey dictionary into a normal dictionary
d['_parents'] = dict(six.iteritems(self._parents))
return d
def __setstate__(self, data_dict):
self.__dict__ = data_dict
# turn the normal dictionary back into a WeakValueDictionary
self._parents = WeakValueDictionary(self._parents)
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and triggers an
invalidation of its ancestors. Should be called any
time the transform changes.
"""
value = self.INVALID
if self.is_affine:
value = self.INVALID_AFFINE
return self._invalidate_internal(value, invalidating_node=self)
def _invalidate_internal(self, value, invalidating_node):
"""
Called by :meth:`invalidate` and subsequently ascends the transform
stack calling each TransformNode's _invalidate_internal method.
"""
# determine if this call will be an extension to the invalidation
# status. If not, then a shortcut means that we needn't invoke an
# invalidation up the transform stack as it will already have been
# invalidated.
# N.B This makes the invalidation sticky, once a transform has been
# invalidated as NON_AFFINE, then it will always be invalidated as
# NON_AFFINE even when triggered with a AFFINE_ONLY invalidation.
# In most cases this is not a problem (i.e. for interactive panning and
# zooming) and the only side effect will be on performance.
status_changed = self._invalid < value
if self.pass_through or status_changed:
self._invalid = value
for parent in list(six.itervalues(self._parents)):
parent._invalidate_internal(value=value,
invalidating_node=self)
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[id(self)] = self
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
Once the "dot" file has been created, it can be turned into a
png easily with::
$> dot -Tpng -o $OUTPUT_FILE $DOT_FILE
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val)
for key, val
in six.iteritems(props)])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in six.iteritems(root.__dict__):
if val is child:
name = key
break
fobj.write('"%s" -> "%s" [label="%s", fontsize=10];\n'
% (hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicitly.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1, 0] - points[0, 0] == 0 or
points[1, 1] - points[0, 1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates
that define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates
that define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
bottom-left corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
top-right corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`,
:attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if any(np.isnan(v) for v in [ax1, ay1, ax2, ay2, bx1, by1, bx2, by2]):
return False
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y > y0 and y < y1))
or (y > y1 and y < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
pts = self.get_points()
ll, ul, lr = transform.transform(np.array([pts[0],
[pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]]))
return Bbox([ll, [lr[0], ul[1]]])
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return self.transformed(transform.inverted())
coefs = {'C': (0.5, 0.5),
'SW': (0, 0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container=None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, six.string_types):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w - W)) - L,
(b + cy * (h - H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
if box_aspect <= 0 or fig_aspect <= 0:
raise ValueError("'box_aspect' and 'fig_aspect' must be positive")
if container is None:
container = self
w, h = container.size
H = w * box_aspect / fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect / box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
with np.errstate(invalid='ignore'):
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = ((abs(dx0 + dx1) + abs(dy0 + dy1)) == 0)
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(
self, np.atleast_3d([np.array(x) for x in bboxes]))
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self.get_points()
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
if not len(bboxes):
raise ValueError("'bboxes' cannot be empty")
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
@staticmethod
def intersection(bbox1, bbox2):
"""
Return the intersection of the two bboxes or None
if they do not intersect.
Implements the algorithm described at:
http://www.tekpool.com/node/2687
"""
intersects = not (bbox2.xmin > bbox1.xmax or
bbox2.xmax < bbox1.xmin or
bbox2.ymin > bbox1.ymax or
bbox2.ymax < bbox1.ymin)
if intersects:
x0 = max([bbox1.xmin, bbox2.xmin])
x1 = min([bbox1.xmax, bbox2.xmax])
y0 = max([bbox1.ymin, bbox2.ymin])
y1 = min([bbox1.ymax, bbox2.ymax])
return Bbox.from_extents(x0, y0, x1, y1)
return None
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points, **kwargs):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self, **kwargs)
points = np.asarray(points, np.float_)
if points.shape != (2, 2):
raise ValueError('Bbox points must be of the form '
'"[[x0, y0], [x1, y1]]".')
self._points = points
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points, **kwargs):
self._check(points)
self.___init__(points, **kwargs)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(np.array([[0.0, 0.0], [1.0, 1.0]], np.float))
@staticmethod
def null():
"""
(staticmethod) Create a new null :class:`Bbox` from (inf, inf) to
(-inf, -inf).
"""
return Bbox(np.array([[np.inf, np.inf], [-np.inf, -np.inf]], np.float))
@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
def __format__(self, fmt):
return (
'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'.
format(self, fmt))
def __str__(self):
return format(self, '')
def __repr__(self):
return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self)
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with "
"update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:, 0] = points[:, 0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:, 1] = points[:, 1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l + w, b + h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
'return whether the bbox has changed since init'
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
'return whether the x-limits have changed since init'
return (self._points[0, 0] != self._points_orig[0, 0] or
self._points[1, 0] != self._points_orig[1, 0])
def mutatedy(self):
'return whether the y-limits have changed since init'
return (self._points[0, 1] != self._points_orig[0, 1] or
self._points[1, 1] != self._points_orig[1, 1])
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform, **kwargs):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
if not bbox.is_bbox:
raise ValueError("'bbox' is not a bbox")
if not isinstance(transform, Transform):
msg = ("'transform' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
if transform.input_dims != 2 or transform.output_dims != 2:
msg = "The input and output dimensions of 'transform' must be 2"
raise ValueError(msg)
BboxBase.__init__(self, **kwargs)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%r, %r)" % (self._bbox, self._transform)
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
points = np.ma.filled(points, 0.0)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :attr:`has_inverse` is True)
If the transform needs to do something non-standard with
:class:`matplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
input_dims = None
"""
The number of input dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
output_dims = None
"""
The number of output dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
has_inverse = False
"""True if this transform has a corresponding inverse transform."""
is_separable = False
"""True if this transform is separable in the x- and y- dimensions."""
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __eq__(self, other):
# equality is based on transform object id. Hence:
# Transform() != Transform().
# Some classes, such as TransformWrapper & AffineBase, will override.
return self is other
def _iter_break_from_left_to_right(self):
"""
Returns an iterator breaking down this transform stack from left to
right recursively. If self == ((A, N), A) then the result will be an
iterator which yields I : ((A, N), A), followed by A : (N, A),
followed by (A, N) : (A), but not ((A, N), A) : I.
This is equivalent to flattening the stack then yielding
``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1).
"""
yield IdentityTransform(), self
@property
def depth(self):
"""
Returns the number of transforms which have been chained
together to form this Transform instance.
.. note::
For the special case of a Composite transform, the maximum depth
of the two is returned.
"""
return 1
def contains_branch(self, other):
"""
Return whether the given transform is a sub-tree of this transform.
This routine uses transform equality to identify sub-trees, therefore
in many situations it is object id which will be used.
For the case where the given transform represents the whole
of this transform, returns True.
"""
if self.depth < other.depth:
return False
# check that a subtree is equal to other (starting from self)
for _, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return True
return False
def contains_branch_seperately(self, other_transform):
"""
Returns whether the given branch is a sub-tree of this transform on
each seperate dimension.
A common use for this method is to identify if a transform is a blended
transform containing an axes' data transform. e.g.::
x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)
"""
if self.output_dims != 2:
raise ValueError('contains_branch_seperately only supports '
'transforms with 2 output dimensions')
# for a non-blended transform each seperate dimension is the same, so
# just return the appropriate shape.
return [self.contains_branch(other_transform)] * 2
def __sub__(self, other):
"""
Returns a transform stack which goes all the way down self's transform
stack, and then ascends back up other's stack. If it can, this is
optimised::
# normally
A - B == a + b.inverted()
# sometimes, when A contains the tree B there is no need to
# descend all the way down to the base of A (via B), instead we
# can just stop at B.
(A + B) - (B)^-1 == A
# similarly, when B contains tree A, we can avoid decending A at
# all, basically:
A - (A + B) == ((B + A) - A).inverted() or B^-1
For clarity, the result of ``(A + B) - B + B == (A + B)``.
"""
# we only know how to do this operation if other is a Transform.
if not isinstance(other, Transform):
return NotImplemented
for remainder, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return remainder
for remainder, sub_tree in other._iter_break_from_left_to_right():
if sub_tree == self:
if not remainder.has_inverse:
raise ValueError("The shortcut cannot be computed since "
"other's transform includes a non-invertable component.")
return remainder.inverted()
# if we have got this far, then there was no shortcut possible
if other.has_inverse:
return self + other.inverted()
else:
raise ValueError('It is not possible to compute transA - transB '
'since transB cannot be inverted and there is no '
'shortcut possible.')
def __array__(self, *args, **kwargs):
"""
Array interface to get at this Transform's affine matrix.
"""
return self.get_affine().get_matrix()
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
Alternatively, accepts a numpy array of length :attr:`input_dims`
and returns a numpy array of length :attr:`output_dims`.
"""
# Ensure that values is a 2d array (but remember whether
# we started with a 1d or 2d array).
values = np.asanyarray(values)
ndim = values.ndim
values = values.reshape((-1, self.input_dims))
# Transform the values
res = self.transform_affine(self.transform_non_affine(values))
# Convert the result back to the shape of the input values.
if ndim == 0:
assert not np.ma.is_masked(res) # just to be on the safe side
return res[0, 0]
if ndim == 1:
return res.reshape(-1)
elif ndim == 2:
return res
else:
raise ValueError(
"Input values must have shape (N x {dims}) "
"or ({dims}).".format(dims=self.input_dims))
return res
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
Alternatively, accepts a numpy array of length :attr:`input_dims`
and returns a numpy array of length :attr:`output_dims`.
"""
return self.get_affine().transform(values)
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
Alternatively, accepts a numpy array of length :attr:`input_dims`
and returns a numpy array of length :attr:`output_dims`.
"""
return values
def transform_bbox(self, bbox):
"""
Transform the given bounding box.
Note, for smarter transforms including caching (a common
requirement for matplotlib figures), see :class:`TransformedBbox`.
"""
return Bbox(self.transform(bbox.get_points()))
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def get_matrix(self):
"""
Get the Affine transformation array for the affine part
of this transform.
"""
return self.get_affine().get_matrix()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
if len(point) != self.input_dims:
msg = "The length of 'point' must be 'self.input_dims'"
raise ValueError(msg)
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return self.transform_path_affine(self.transform_path_non_affine(path))
def transform_path_affine(self, path):
"""
Returns a path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return self.get_affine().transform_path_affine(path)
def transform_path_non_affine(self, path):
"""
Returns a path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
x = self.transform_non_affine(path.vertices)
return Path._fast_from_codes_and_verts(x, path.codes,
{'interpolation_steps': path._interpolation_steps,
'should_simplify': path.should_simplify})
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims != 2 or self.output_dims != 2:
raise NotImplementedError('Only defined in 2D')
if pts.shape[1] != 2:
raise ValueError("'pts' must be array with 2 columns for x,y")
if angles.ndim != 1 or angles.shape[0] != pts.shape[0]:
msg = "'angles' must be a column vector and have same number of"
msg += " rows as 'pts'"
raise ValueError(msg)
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)]
# Transform both sets of points
tpts = self.transform(pts)
tpts2 = self.transform(pts2)
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2(d[:, 1], d[:, 0])
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
if not isinstance(child, Transform):
msg = ("'child' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
self._init(child)
self.set_children(child)
def _init(self, child):
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __eq__(self, other):
return self._child.__eq__(other)
if DEBUG:
def __str__(self):
return str(self._child)
# NOTE: Transform.__[gs]etstate__ should be sufficient when using only
# Python 3.4+.
def __getstate__(self):
# only store the child information and parents
return {
'child': self._child,
'input_dims': self.input_dims,
'output_dims': self.output_dims,
# turn the weakkey dictionary into a normal dictionary
'parents': dict(six.iteritems(self._parents))
}
def __setstate__(self, state):
# re-initialise the TransformWrapper with the state's child
self._init(state['child'])
# The child may not be unpickled yet, so restore its information.
self.input_dims = state['input_dims']
self.output_dims = state['output_dims']
# turn the normal dictionary back into a WeakValueDictionary
self._parents = WeakValueDictionary(state['parents'])
def __repr__(self):
return "TransformWrapper(%r)" % self._child
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
self.get_matrix = child.get_matrix
# note we do not wrap other properties here since the transform's
# child can be changed with WrappedTransform.set and so checking
# is_affine and other such properties may be dangerous.
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
if (child.input_dims != self.input_dims or
child.output_dims != self.output_dims):
msg = ("The new child must have the same number of input and"
" output dimensions as the current child.")
raise ValueError(msg)
self.set_children(child)
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_affine(self):
return self._child.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self, *args, **kwargs):
Transform.__init__(self, *args, **kwargs)
self._inverted = None
def __array__(self, *args, **kwargs):
# optimises the access of the transform matrix vs the superclass
return self.get_matrix()
@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
def __eq__(self, other):
if getattr(other, "is_affine", False):
return np.all(self.get_matrix() == other.get_matrix())
return NotImplemented
def transform(self, values):
return self.transform_affine(values)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, values):
raise NotImplementedError('Affine subclasses should override this '
'method.')
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self.transform_path_affine(path)
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return Path(self.transform_affine(path.vertices),
path.codes, path._interpolation_steps)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
has_inverse = True
input_dims = 2
output_dims = 2
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
def transform_affine(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform([point], mtx)[0]
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(points))
return self._transform_affine(points)
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix=None, **kwargs):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self, **kwargs)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
# def __cmp__(self, other):
# # XXX redundant. this only tells us eq.
# if (isinstance(other, Affine2D) and
# (self.get_matrix() == other.get_matrix()).all()):
# return 0
# return -1
@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
.
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3, 3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
if not isinstance(other, Affine2DBase):
msg = ("'other' must be an instance of"
" 'matplotlib.transform.Affine2DBase'")
raise ValueError(msg)
self._mtx = other.get_matrix()
self.invalidate()
@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees * np.pi / 180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def skew(self, xShear, yShear):
"""
Adds a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in radians.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
rotX = np.tan(xShear)
rotY = np.tan(yShear)
skew_mtx = np.array(
[[1.0, rotX, 0.0], [rotY, 1.0, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(skew_mtx, self._mtx)
self.invalidate()
return self
def skew_deg(self, xShear, yShear):
"""
Adds a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in degrees.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.skew(np.deg2rad(xShear), np.deg2rad(yShear))
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return np.asanyarray(points)
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedAffine2D.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedAffine2D.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
@property
def depth(self):
return max([self._x.depth, self._y.depth])
def contains_branch(self, other):
# a blended transform cannot possibly contain a branch from two different transforms.
return False
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def _get_has_inverse(self):
return self._x.has_inverse and self._y.has_inverse
has_inverse = property(_get_has_inverse)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(points)
if x.input_dims == 2:
x_points = x.transform_non_affine(points)[:, 0:1]
else:
x_points = x.transform_non_affine(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(points)[:, 1:]
else:
y_points = y.transform_non_affine(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
is_affine = x_transform.is_affine and y_transform.is_affine
is_separable = x_transform.is_separable and y_transform.is_separable
is_correct = is_affine and is_separable
if not is_correct:
msg = ("Both *x_transform* and *y_transform* must be 2D affine"
" transforms.")
raise ValueError(msg)
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedGenericTransform.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedTransform.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
if a.output_dims != b.input_dims:
msg = ("The output dimension of 'a' must be equal to the input"
" dimensions of 'b'")
raise ValueError(msg)
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _invalidate_internal(self, value, invalidating_node):
# In some cases for a composite transform, an invalidating call to AFFINE_ONLY needs
# to be extended to invalidate the NON_AFFINE part too. These cases are when the right
# hand transform is non-affine and either:
# (a) the left hand transform is non affine
# (b) it is the left hand node which has triggered the invalidation
if value == Transform.INVALID_AFFINE \
and not self._b.is_affine \
and (not self._a.is_affine or invalidating_node is self._a):
value = Transform.INVALID
Transform._invalidate_internal(self, value=value,
invalidating_node=invalidating_node)
def __eq__(self, other):
if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
return self is other or (self._a == other._a and self._b == other._b)
else:
return False
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
@property
def depth(self):
return self._a.depth + self._b.depth
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
def __repr__(self):
return "CompositeGenericTransform(%r, %r)" % (self._a, self._b)
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_non_affine(points)
else:
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_path_non_affine(path)
else:
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if not self._b.is_affine:
return self._b.get_affine()
else:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def _get_has_inverse(self):
return self._a.has_inverse and self._b.has_inverse
has_inverse = property(_get_has_inverse)
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
if not a.is_affine or not b.is_affine:
raise ValueError("'a' and 'b' must be affine transforms")
if a.output_dims != b.input_dims:
msg = ("The output dimension of 'a' must be equal to the input"
" dimensions of 'b'")
raise ValueError(msg)
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Affine2DBase.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
@property
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
def __repr__(self):
return "CompositeAffine2D(%r, %r)" % (self._a, self._b)
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
# check to see if any of a or b are IdentityTransforms. We use
# isinstance here to guarantee that the transforms will *always*
# be IdentityTransforms. Since TransformWrappers are mutable,
# use of equality here would be wrong.
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout, **kwargs):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
if not boxin.is_bbox or not boxout.is_bbox:
msg = "'boxin' and 'boxout' must be bbox"
raise ValueError(msg)
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%r, %r)" % (self._boxin, self._boxout)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout, **kwargs):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
if not boxout.is_bbox:
raise ValueError("'boxout' must be bbox")
Affine2DBase.__init__(self, **kwargs)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformToMaxOnly(BboxTransformTo):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox` with a fixed upper left of (0, 0).
"""
def __repr__(self):
return "BboxTransformToMaxOnly(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
xmax, ymax = self._boxout.max
if DEBUG and (xmax == 0 or ymax == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[xmax, 0.0, 0.0],
[ 0.0, ymax, 0.0],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin, **kwargs):
if not boxin.is_bbox:
raise ValueError("'boxin' must be bbox")
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%r)" % (self._boxin)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans, **kwargs):
Affine2DBase.__init__(self, **kwargs)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%r)" % (self._t,)
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
.. note::
Paths are considered immutable by this class. Any update to the
path's vertices/codes will not trigger a transform recomputation.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
if not isinstance(transform, Transform):
msg = ("'transform' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
# only recompute if the invalidation includes the non_affine part of the transform
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path._fast_from_codes_and_verts(
self._transform.transform_non_affine(self._path.vertices),
None,
{'interpolation_steps': self._path._interpolation_steps,
'should_simplify': self._path.should_simplify})
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
self._revalidate()
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Modify the endpoints of a range as needed to avoid singularities.
*vmin*, *vmax*
the initial endpoints.
*tiny*
threshold for the ratio of the interval to the maximum absolute
value of its endpoints. If the interval is smaller than
this, it will be expanded. This value should be around
1e-15 or larger; otherwise the interval will be approaching
the double precision resolution limit.
*expander*
fractional amount by which *vmin* and *vmax* are expanded if
the original interval is too small, based on *tiny*.
*increasing*: [True | False]
If True (default), swap *vmin*, *vmax* if *vmin* > *vmax*
Returns *vmin*, *vmax*, expanded and/or swapped if necessary.
If either input is inf or NaN, or if both inputs are 0 or very
close to zero, it returns -*expander*, *expander*.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
maxabsvalue = max(abs(vmin), abs(vmax))
if maxabsvalue < (1e6 / tiny) * MINFLOAT:
vmin = -expander
vmax = expander
elif vmax - vmin <= maxabsvalue * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
|
mit
|
berkeley-stat159/project-lambda
|
code/stat159lambda/reproduction/inter_run_diagnostics.py
|
1
|
1629
|
from __future__ import print_function, division
import numpy as np
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import gc
from stat159lambda.config import REPO_HOME_PATH, NUM_OFFSET_VOLUMES
from stat159lambda.utils import data_path as dp
def calc_vol_rms_diff(data_file_path):
"""
Finds the difference between data[n+1] and data[n] for all elements in data
array to calculate the root mean squares. Does not include the data points
when they are tuning in the first 17 seconds.
Parameters
----------
data_file_path : string
Returns
-------
vol_rms_diff : array
"""
data = np.load(open(data_file_path))
diff_data = np.diff(data, axis=1)
del data
gc.collect()
vol_rms_diff = np.sqrt(np.mean(diff_data**2, axis=0))
return vol_rms_diff[NUM_OFFSET_VOLUMES:]
def save_plot(vol_rms_diff, subj_num):
"""
Plots the root mean square differences for a particular subject and saves
that plot into the figures folder
Parameters
----------
vol_rms_diff : array
subj_num : int
Returns
-------
None
"""
plt.plot(vol_rms_diff)
plot_path = '{0}/figures/subj{1}_vol_rms_diff.png'.format(REPO_HOME_PATH,
subj_num)
plt.savefig(plot_path)
print('Saved {0}'.format(plot_path))
if __name__ == '__main__':
subj_num, fwhm_mm = 1, 8
vol_rms_diff = calc_vol_rms_diff(dp.get_smoothed_2d_path(subj_num,
fwhm_mm))
save_plot(vol_rms_diff, subj_num)
|
bsd-3-clause
|
dpaiton/OpenPV
|
pv-core/analysis/python/plot_membrane.py
|
1
|
2071
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadWeights as rw
import PVConversions as conv
if len(sys.argv) < 2:
print "usage: membrane activity-filename"
print len(sys.argv)
sys.exit()
a = open(sys.argv[1], "r")
v = []
t = []
t2 = []
vth = []
act = []
ge = []
gi = []
gib = []
aa = []
aa2 = []
actcount = 0
for line in a:
a = line
b = a.find("V=")
h = a.find("t=")
bth = a.find("Vth=")
gep = a.find("G_E=")
gip = a.find("G_I=")
gibp = a.find("G_IB=")
actp = a.find("a=")
c = a[b:].split()
cth = a[bth:].split()
i = a[h:].split()
gif = a[gip:].split()
gef = a[gep:].split()
gibf = a[gibp:].split()
actf = a[actp:].split()
actm = actf[0].strip("a=")
actmo = float(actm)
if actmo == 1.0:
actcount += 1
d = c[0].strip("V=")
if len(cth[0]) > 6:
dth = cth[0].strip("Vth=")
else:
dth = cth[1]
j = i[0].strip("t=")
if len(gef[0]) > 5:
gem = gef[0].strip("G_E=")
if len(gef[0]) < 5:
gem = gef[1]
if len(gibf[0]) > 6:
gibm = gibf[0].strip("G_IB=")
else:
gibm = gibf[1]
#if len(gibm) == 0:
# gibm = gibf[1]
if len(gif[0]) > 5:
gim = gif[0].strip("G_I=")
else:
gim = gif[1]
v.append(d)
vth.append(dth)
t.append(j)
ge.append(gem)
gi.append(gim)
gib.append(gibm)
act.append(actm)
x = t
y = v
y2 = vth
y3 = ge
y4 = gi
y5 = gib
plt.figure(1)
plt.subplot(211)
plt.plot(x, y)
plt.plot(x, y2, 'r')
plt.ylabel('Membrane Rate')
plt.title('Membrane Activity')
plt.grid(True)
plt.subplot(212)
plt.ylabel('Conductance Rate')
plt.xlabel('Time (ms)\nG_E = green, G_I = yellow, G_IB = black\n Vth = red, V = blue')
plt.title('Conductance Activity Fired %.2f times' %(actcount))
if len(y3) > 0:
plt.plot(x, y3, 'g')
if len(y4) > 0:
plt.plot(x, y4, 'y')
if len(y5) > 0:
plt.plot(x, y5, 'k')
plt.grid(True)
#plt.annotate(", xy = (30, 2.75), xytext='data')
plt.show()
sys.exit()
|
epl-1.0
|
sergiohgz/incubator-airflow
|
tests/contrib/operators/test_hive_to_dynamodb_operator.py
|
10
|
5074
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
import mock
import pandas as pd
from airflow import configuration, DAG
configuration.load_test_config()
import datetime
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
import airflow.contrib.operators.hive_to_dynamodb
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class HiveToDynamoDBTransferOperatorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(
aws_conn_id='aws_default', region_name='us-east-1')
def process_data(self, data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, get_results_mock):
# this table needs to be created in production
table = self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, get_results_mock):
# this table needs to be created in production
table = self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
VisTrails/VisTrails
|
vistrails/packages/matplotlib/bases.py
|
2
|
6929
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import pylab
import urllib
from matplotlib.backend_bases import FigureCanvasBase
from vistrails.core.configuration import ConfigField
from vistrails.core.modules.basic_modules import CodeRunnerMixin
from vistrails.core.modules.config import ModuleSettings, IPort
from vistrails.core.modules.output_modules import ImageFileMode, \
ImageFileModeConfig, OutputModule, IPythonModeConfig, IPythonMode
from vistrails.core.modules.vistrails_module import Module, NotCacheable
################################################################################
class MplProperties(Module):
def compute(self, artist):
pass
class Artist(object):
def update_sub_props(self, objs):
# must implement in subclass
pass
#base class for 2D plots
class MplPlot(NotCacheable, Module):
pass
class MplSource(CodeRunnerMixin, MplPlot):
"""
MplSource is a module similar to PythonSource. The user can enter
Matplotlib code into this module. This will then get connected to
MplFigure to draw the figure. Please note that, codes entered in
this module should limit to subplot() scope only. Using
Figure-level commands, e.g. figure() or show(), the result will be
unknown
"""
_input_ports = [('source', '(basic:String)')]
_output_ports = [('value', '(MplSource)')]
def compute(self):
source = self.get_input('source')
self.set_output('value', lambda figure: self.plot_figure(figure,
source))
def plot_figure(self, figure, source):
s = ('from pylab import *\n'
'from numpy import *\n' +
'figure(%d)\n' % figure.number +
urllib.unquote(source))
self.run_code(s, use_input=True, use_output=True)
class MplFigure(Module):
_input_ports = [IPort("addPlot", "(MplPlot)", depth=1),
("axesProperties", "(MplAxesProperties)"),
("figureProperties", "(MplFigureProperties)"),
("setLegend", "(MplLegend)")]
_output_ports = [("figure", "(MplFigure)")]
def compute(self):
# Create a figure
figInstance = pylab.figure()
pylab.hold(True)
# Run the plots
plots = self.get_input("addPlot")
for plot in plots:
plot(figInstance)
if self.has_input("figureProperties"):
figure_props = self.get_input("figureProperties")
figure_props.update_props(figInstance)
if self.has_input("axesProperties"):
axes_props = self.get_input("axesProperties")
axes_props.update_props(figInstance.gca())
if self.has_input("setLegend"):
legend = self.get_input("setLegend")
figInstance.gca().legend()
self.set_output("figure", figInstance)
class MplContourSet(Module):
pass
class MplQuadContourSet(MplContourSet):
pass
class MplFigureToFile(ImageFileMode):
config_cls = ImageFileModeConfig
formats = ['pdf', 'png', 'jpg']
def compute_output(self, output_module, configuration):
figure = output_module.get_input('value')
w = configuration["width"]
h = configuration["height"]
img_format = self.get_format(configuration)
filename = self.get_filename(configuration, suffix='.%s' % img_format)
w_inches = w / 72.0
h_inches = h / 72.0
previous_size = tuple(figure.get_size_inches())
figure.set_size_inches(w_inches, h_inches)
canvas = FigureCanvasBase(figure)
canvas.print_figure(filename, dpi=72, format=img_format)
figure.set_size_inches(previous_size[0],previous_size[1])
canvas.draw()
class MplIPythonModeConfig(IPythonModeConfig):
mode_type = "ipython"
_fields = [ConfigField('width', None, int),
ConfigField('height', None, int)]
class MplIPythonMode(IPythonMode):
mode_type = "ipython"
config_cls = MplIPythonModeConfig
def compute_output(self, output_module, configuration):
from IPython.display import set_matplotlib_formats
from IPython.core.display import display
set_matplotlib_formats('png')
# TODO: use size from configuration
fig = output_module.get_input('value')
display(fig)
class MplFigureOutput(OutputModule):
_settings = ModuleSettings(configure_widget="vistrails.gui.modules.output_configuration:OutputModuleConfigurationWidget")
_input_ports = [('value', 'MplFigure')]
_output_modes = [MplFigureToFile, MplIPythonMode]
_modules = [(MplProperties, {'abstract': True}),
(MplPlot, {'abstract': True}),
(MplSource, {'configureWidgetType': \
('vistrails.packages.matplotlib.widgets',
'MplSourceConfigurationWidget')}),
MplFigure,
MplContourSet,
MplQuadContourSet,
MplFigureOutput]
|
bsd-3-clause
|
johndpope/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py
|
136
|
1696
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
villalonreina/dipy
|
scratch/very_scratch/simulation_comparisons.py
|
20
|
12707
|
import nibabel
import os
import numpy as np
import dipy as dp
#import dipy.core.generalized_q_sampling as dgqs#dipy.
import dipy.reconst.gqi as dgqs
import dipy.io.pickles as pkl
import scipy as sp
from matplotlib.mlab import find
#import dipy.core.sphere_plots as splots
import dipy.core.sphere_stats as sphats
import dipy.core.geometry as geometry
import get_vertices as gv
#old SimData files
'''
results_SNR030_1fibre
results_SNR030_1fibre+iso
results_SNR030_2fibres_15deg
results_SNR030_2fibres_30deg
results_SNR030_2fibres_60deg
results_SNR030_2fibres_90deg
results_SNR030_2fibres+iso_15deg
results_SNR030_2fibres+iso_30deg
results_SNR030_2fibres+iso_60deg
results_SNR030_2fibres+iso_90deg
results_SNR030_isotropic
'''
#fname='/home/ian/Data/SimData/results_SNR030_1fibre'
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 1000 * 100 is the number of all rows.
The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80,
80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80,
120, 160, 200, 240, 280, 320, 360.
'''
#new complete SimVoxels files
simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
simdir = '/home/ian/Data/SimVoxels/'
def gq_tn_calc_save():
for simfile in simdata:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
gq = dp.GeneralizedQSampling(sim_data,bvals,gradients)
gqfile = simdir+'gq/'+dataname+'.pkl'
pkl.save_pickle(gqfile,gq)
'''
gq.IN gq.__doc__ gq.glob_norm_param
gq.QA gq.__init__ gq.odf
gq.__class__ gq.__module__ gq.q2odf_params
'''
tn = dp.Tensor(sim_data,bvals,gradients)
tnfile = simdir+'tn/'+dataname+'.pkl'
pkl.save_pickle(tnfile,tn)
'''
tn.ADC tn.__init__ tn._getevals
tn.B tn.__module__ tn._getevecs
tn.D tn.__new__ tn._getndim
tn.FA tn.__reduce__ tn._getshape
tn.IN tn.__reduce_ex__ tn._setevals
tn.MD tn.__repr__ tn._setevecs
tn.__class__ tn.__setattr__ tn.adc
tn.__delattr__ tn.__sizeof__ tn.evals
tn.__dict__ tn.__str__ tn.evecs
tn.__doc__ tn.__subclasshook__ tn.fa
tn.__format__ tn.__weakref__ tn.md
tn.__getattribute__ tn._evals tn.ndim
tn.__getitem__ tn._evecs tn.shape
tn.__hash__ tn._getD
'''
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 100 * 1000 is the number of all rows.
At the moment this module is hardwired to the use of the EDS362
spherical mesh. I am assumung (needs testing) that directions 181 to 361
are the antipodal partners of directions 0 to 180. So when counting the
number of different vertices that occur as maximal directions we wll map
the indices modulo 181.
'''
def analyze_maxima(indices, max_dirs, subsets):
'''This calculates the eigenstats for each of the replicated batches
of the simulation data
'''
results = []
for direction in subsets:
batch = max_dirs[direction,:,:]
index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))])
#normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch)
centre, b1 = sphats.eigenstats(batch)
# make azimuth be in range (0,360) rather than (-180,180)
centre[1] += 360*(centre[1] < 0)
#results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety)))
results.append(np.concatenate((centre, b1, index_variety)))
return results
#dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3))
# these are the principal directions for the full set of simulations
#gq_tn_calc_save()
eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz'))
odf_vertices=eds['vertices']
def run_comparisons(sample_data=35):
for simfile in [simdata[sample_data]]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
# gqfile = simdir+'gq/'+dataname+'.pkl'
# gq = pkl.load_pickle(gqfile)
tnfile = simdir+'tn/'+dataname+'.pkl'
tn = pkl.load_pickle(tnfile)
dt_first_directions_in=odf_vertices[tn.IN]
dt_indices = tn.IN.reshape((100,1000))
dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,91))
# gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
# gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
# gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w')
# results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
results = np.vstack(dt_results)
print >> out, results[:,:]
out.close()
#up = dt_batch[:,2]>= 0
#splots.plot_sphere(dt_batch[up], 'batch '+str(direction))
#splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre)
#spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95%
#rotated = np.dot(dt_batch,evecs)
#rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0])
#eval_order = np.argsort(rot_evals)
#rotated = rotated[:,eval_order]
#up = rotated[:,2]>= 0
#splot.plot_sphere(rotated[up],'first1000')
#splot.plot_lambert(rotated[up],'batch '+str(direction))
def run_gq_sims(sample_data=[35]):
for simfile in [simdata[sample] for sample in sample_data]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
for j in range(10):
s = sim_data[10000+j,:]
gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=7)
t0, t1, t2, npa = gqs.npa(s, width = 5)
print t0, t1, t2, npa
'''
for (i,o) in enumerate(gqs.odf(s)):
print i,o
for (i,o) in enumerate(gqs.odf_vertices):
print i,o
'''
#o = gqs.odf(s)
#v = gqs.odf_vertices
#pole = v[t0[0]]
#eqv = dgqs.equatorial_zone_vertices(v, pole, 5)
#print 'Number of equatorial vertices: ', len(eqv)
#print np.max(o[eqv]),np.min(o[eqv])
#cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv]
#print np.min(cos1), np.max(cos1)
#print 'equatorial max in equatorial vertices:', t1[0] in eqv
#x = np.cross(v[t0[0]],v[t1[0]])
#x = x/np.sqrt(np.sum(x**2))
#print x
#ptchv = dgqs.patch_vertices(v, x, 5)
#print len(ptchv)
#eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])]
#print (eqp, o[eqp])
#print t2[0] in ptchv, t2[0] in eqv
#print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]])
#print ptchv[np.argmin([o[v] for v in ptchv])]
#gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
#gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
#gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
#np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
#out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w')
#results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#results = np.vstack(dt_results)
#print >> out, results[:,:]
#out.close()
#run_comparisons()
run_gq_sims()
|
bsd-3-clause
|
h2educ/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
347
|
3268
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
bsd-3-clause
|
jpsarkar/User_Location_Density_Categorization
|
source/clusterCustomer.py
|
1
|
2641
|
#!/usr/bin/env python
# Author: Jnanendra Sarkar
#
################################################################
import pandas as pd
import numpy as np
import csv
import random
import math
import haversine as hs
import pyspark.sql
import scipy.spatial.distance as sp
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
sc = SparkContext('local')
spark = SparkSession(sc)
iFile = 'sample.csv'
oFile ='sample_output.csv'
MAX_ITR = 100
def minDistMatrix(DS, center, k):
D = sp.cdist(DS, center, 'euclidean')
label = D.argmin(axis=1)
Dro, Dco = D.shape
minDist = np.array([ [0] * Dco ] * Dro)
j = 0
while (j < k):
TempList = np.where(label == j)
for p in TempList[0]:
minDist[p,j] = D[p,j]
j += 1
return minDist
def getFitness(mDist):
return sum(sum(mDist))
def chkTermination(oldCenter, currCenter, itr):
if itr > MAX_ITR: return True
return oldCenter.tolist() == currCenter.tolist()
def getClusterLabel(DS, center):
#print("Compute distance matrix...")
D = sp.cdist(DS, center, 'euclidean')
label = D.argmin(axis=1)
return label
def updateCenter(DS, center, label, k):
i = 0
while (i < k):
TI = np.where(label == i)
#print("length...")
#print(len(TI))
if (i == 0):
if (len(TI) == 0):
TC = center[i]
else:
TC = np.mean(DS[TI],0)
else:
if (len(TI) == 0):
TC = center[i]
else:
TC = np.vstack((TC,np.mean(DS[TI],0)))
i += 1
#print(TC)
return TC
df = spark.read.csv(iFile, header=None, inferSchema=True)
data = df.filter((df._c3 != 0) & (df._c4 != 0)).select(df._c3,df._c4)
dataArr = np.array(data.collect())
k = 3
itr = 0
nRec, nDim = dataArr.shape
CArr = dataArr[random.sample(list(range(1, nRec)),k)]
print("Random initialized Centers...")
print(CArr)
ro, co = CArr.shape
oldCArr = np.array([ [0] * co ] * ro)
while not chkTermination(oldCArr, CArr, itr):
oldCArr = CArr
itr += 1
label = getClusterLabel(dataArr, CArr)
CArr = updateCenter(dataArr, CArr, label, k)
fitness = getFitness(minDistMatrix(dataArr, CArr, k))
print("Iteration : ",itr," - Objective function value : ",fitness)
print("Converged Centers...")
print(CArr)
finalData = np.column_stack((dataArr,label))
np.savetxt(oFile, finalData, delimiter=",",fmt="%14.10f,%14.10f,%d")
|
mit
|
jpautom/scikit-learn
|
examples/svm/plot_svm_regression.py
|
120
|
1520
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/pandas/io/sql.py
|
7
|
58266
|
# -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import re
import numpy as np
import pandas.lib as lib
from pandas.types.missing import isnull
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.common import (is_list_like, is_dict_like,
is_datetime64tz_dtype)
from pandas.compat import (map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.tseries.tools import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _validate_flavor_parameter(flavor):
"""
Checks whether a database 'flavor' was specified.
If not None, produces FutureWarning if 'sqlite' and
raises a ValueError if anything else.
"""
if flavor is not None:
if flavor == 'sqlite':
warnings.warn("the 'flavor' parameter is deprecated "
"and will be removed in a future version, "
"as 'sqlite' is the only supported option "
"when SQLAlchemy is not installed.",
FutureWarning, stacklevel=2)
else:
raise ValueError("database flavor {flavor} is not "
"supported".format(flavor=flavor))
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=True)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single SQLtype or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor=None, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
_validate_flavor_parameter(flavor)
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, string_types):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isnull(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index.get_level_values(i))
column_names_and_types.append((idx_label, idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notnull_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnulldata = col[~isnull(col)]
if len(notnulldata):
col_for_inference = notnulldata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
'string': 'TEXT',
'floating': 'REAL',
'integer': 'INTEGER',
'datetime': 'TIMESTAMP',
'date': 'DATE',
'time': 'TIME',
'boolean': 'INTEGER',
}
def _get_unicode_name(name):
try:
uname = name.encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
wld = '?' # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor=None, is_cursor=False):
_validate_flavor_parameter(flavor)
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = '?'
query = ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
|
mit
|
ronfung/incubator-airflow
|
tests/operators/hive_operator.py
|
40
|
14061
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
import os
import unittest
import mock
import nose
import six
from airflow import DAG, configuration, operators
configuration.load_test_config()
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
if 'AIRFLOW_RUNALL_TESTS' in os.environ:
import airflow.hooks.hive_hooks
import airflow.operators.presto_to_mysql
class HiveServer2Test(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
self.nondefault_schema = "nondefault"
def test_select_conn(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sql = "select 1"
hook = HiveServer2Hook()
hook.get_records(sql)
def test_multi_statements(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"DROP TABLE test_multi_statements",
]
hook = HiveServer2Hook()
hook.get_records(sqls)
def test_get_metastore_databases(self):
if six.PY2:
from airflow.hooks.hive_hooks import HiveMetastoreHook
hook = HiveMetastoreHook()
hook.get_databases()
def test_to_csv(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sql = "select 1"
hook = HiveServer2Hook()
hook.to_csv(hql=sql, csv_filepath="/tmp/test_to_csv")
def connect_mock(self, host, port,
auth_mechanism, kerberos_service_name,
user, database):
self.assertEqual(database, self.nondefault_schema)
@mock.patch('HiveServer2Hook.connect', return_value="foo")
def test_select_conn_with_schema(self, connect_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
hook = HiveServer2Hook()
# Run
hook.get_conn(self.nondefault_schema)
# Verify
self.assertTrue(connect_mock.called)
(args, kwargs) = connect_mock.call_args_list[0]
self.assertEqual(self.nondefault_schema, kwargs['database'])
def test_get_results_with_schema(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
from unittest.mock import MagicMock
# Configure
sql = "select 1"
schema = "notdefault"
hook = HiveServer2Hook()
cursor_mock = MagicMock(
__enter__=cursor_mock,
__exit__=None,
execute=None,
fetchall=[],
)
get_conn_mock = MagicMock(
__enter__=get_conn_mock,
__exit__=None,
cursor=cursor_mock,
)
hook.get_conn = get_conn_mock
# Run
hook.get_results(sql, schema)
# Verify
get_conn_mock.assert_called_with(self.nondefault_schema)
@mock.patch('HiveServer2Hook.get_results', return_value={'data': []})
def test_get_records_with_schema(self, get_results_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
sql = "select 1"
hook = HiveServer2Hook()
# Run
hook.get_records(sql, self.nondefault_schema)
# Verify
self.assertTrue(self.connect_mock.called)
(args, kwargs) = self.connect_mock.call_args_list[0]
self.assertEqual(sql, args[0])
self.assertEqual(self.nondefault_schema, kwargs['schema'])
@mock.patch('HiveServer2Hook.get_results', return_value={'data': []})
def test_get_pandas_df_with_schema(self, get_results_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
sql = "select 1"
hook = HiveServer2Hook()
# Run
hook.get_pandas_df(sql, self.nondefault_schema)
# Verify
self.assertTrue(self.connect_mock.called)
(args, kwargs) = self.connect_mock.call_args_list[0]
self.assertEqual(sql, args[0])
self.assertEqual(self.nondefault_schema, kwargs['schema'])
class HivePrestoTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
def test_hive(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='basic_hql', hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_queues(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='test_hive_queues', hql=self.hql,
mapred_queue='default', mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_hive_queues',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_dryrun(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)
t.dry_run()
def test_beeline(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='beeline_hql', hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto(self):
sql = """
SELECT count(1) FROM airflow.static_babynames_partitioned;
"""
import airflow.operators.presto_check_operator
t = operators.presto_check_operator.PrestoCheckOperator(
task_id='presto_check', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto_to_mysql(self):
import airflow.operators.presto_to_mysql
t = operators.presto_to_mysql.PrestoToMySqlTransfer(
task_id='presto_to_mysql_check',
sql="""
SELECT name, count(*) as ccount
FROM airflow.static_babynames
GROUP BY name
""",
mysql_table='test_static_babynames',
mysql_preoperator='TRUNCATE TABLE test_static_babynames;',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hdfs_sensor(self):
t = operators.sensors.HdfsSensor(
task_id='hdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_webhdfs_sensor(self):
t = operators.sensors.WebHdfsSensor(
task_id='webhdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
timeout=120,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_sql_sensor(self):
t = operators.sensors.SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_stats(self):
import airflow.operators.hive_stats_operator
t = operators.hive_stats_operator.HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_parses_partitions_with_periods(self):
t = operators.sensors.NamedHivePartitionSensor.parse_partition_name(
partition="schema.table/part1=this.can.be.an.issue/part2=ok")
self.assertEqual(t[0], "schema")
self.assertEqual(t[1], "table")
self.assertEqual(t[2], "part1=this.can.be.an.issue/part2=this_should_be_ok")
@nose.tools.raises(airflow.exceptions.AirflowSensorTimeout)
def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds=nonexistent"
],
poke_interval=0.1,
timeout=1,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_partition_sensor(self):
t = operators.sensors.HivePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_metastore_sql_sensor(self):
t = operators.sensors.MetastorePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
partition_name='ds={}'.format(DEFAULT_DATE_DS),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive2samba(self):
import airflow.operators.hive_to_samba_operator
t = operators.hive_to_samba_operator.Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_to_mysql(self):
import airflow.operators.hive_to_mysql
t = operators.hive_to_mysql.HiveToMySqlTransfer(
mysql_conn_id='airflow_db',
task_id='hive_to_mysql_check',
create=True,
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table='test_static_babynames',
mysql_preoperator=[
'DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))',
],
dag=self.dag)
t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
|
apache-2.0
|
lorenzo-desantis/mne-python
|
mne/evoked.py
|
2
|
52124
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis Engemann <[email protected]>
# Andrew Dykstra <[email protected]>
# Mads Jensen <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
import warnings
from .baseline import rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin,
equalize_channels)
from .filter import resample, detrend, FilterMixin
from .fixes import in1d
from .utils import check_fname, logger, verbose, object_hash, _time_mask
from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field,
plot_evoked_image, plot_evoked_topo)
from .viz.evoked import _plot_evoked_white
from .externals.six import string_types
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tag import read_tag
from .io.tree import dir_tree_find
from .io.pick import channel_type, pick_types
from .io.meas_info import read_meas_info, write_meas_info
from .io.proj import ProjMixin
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_string, write_float_matrix,
write_id)
from .io.base import ToDataFrameMixin
_aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
_aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
ToDataFrameMixin):
"""Evoked data
Parameters
----------
fname : string
Name of evoked/average FIF file to load.
If None no data is loaded.
condition : int, or str
Dataset ID number (int) or comment/name (str). Optional if there is
only one data set in file.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
proj : bool, optional
Apply SSP projection vectors
kind : str
Either 'average' or 'standard_error'. The type of data to read.
Only used if 'condition' is a str.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
info : dict
Measurement info.
ch_names : list of string
List of channels' names.
nave : int
Number of averaged epochs.
kind : str
Type of data, either average or standard_error.
first : int
First time sample.
last : int
Last time sample.
comment : string
Comment on dataset. Can be the condition.
times : array
Array of time instants in seconds.
data : array of shape (n_channels, n_times)
Evoked response.
verbose : bool, str, int, or None.
See above.
"""
@verbose
def __init__(self, fname, condition=None, baseline=None, proj=True,
kind='average', verbose=None):
if fname is None:
raise ValueError('No evoked filename specified')
self.verbose = verbose
logger.info('Reading %s ...' % fname)
f, tree, _ = fiff_open(fname)
with f as fid:
if not isinstance(proj, bool):
raise ValueError(r"'proj' must be 'True' or 'False'")
# Read the measurement info
info, meas = read_meas_info(fid, tree)
info['filename'] = fname
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
raise ValueError('Could not find processed data')
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
if len(evoked_node) == 0:
raise ValueError('Could not find evoked data')
# find string-based entry
if isinstance(condition, string_types):
if kind not in _aspect_dict.keys():
raise ValueError('kind must be "average" or '
'"standard_error"')
comments, aspect_kinds, t = _get_entries(fid, evoked_node)
goods = np.logical_and(in1d(comments, [condition]),
in1d(aspect_kinds,
[_aspect_dict[kind]]))
found_cond = np.where(goods)[0]
if len(found_cond) != 1:
raise ValueError('condition "%s" (%s) not found, out of '
'found datasets:\n %s'
% (condition, kind, t))
condition = found_cond[0]
if condition >= len(evoked_node) or condition < 0:
fid.close()
raise ValueError('Data set selector out of range')
my_evoked = evoked_node[condition]
# Identify the aspects
aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
if len(aspects) > 1:
logger.info('Multiple aspects found. Taking first one.')
my_aspect = aspects[0]
# Now find the data in the evoked block
nchan = 0
sfreq = -1
chs = []
comment = None
for k in range(my_evoked['nent']):
my_kind = my_evoked['directory'][k].kind
pos = my_evoked['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif my_kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif my_kind == FIFF.FIFF_NCHAN:
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif my_kind == FIFF.FIFF_SFREQ:
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif my_kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
if comment is None:
comment = 'No comment'
# Local channel information?
if nchan > 0:
if chs is None:
raise ValueError('Local channel information was not found '
'when it was expected.')
if len(chs) != nchan:
raise ValueError('Number of channels and number of '
'channel definitions are different')
info['chs'] = chs
info['nchan'] = nchan
logger.info(' Found channel information in evoked data. '
'nchan = %d' % nchan)
if sfreq > 0:
info['sfreq'] = sfreq
nsamp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], comment))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Read the data in the aspect block
nave = 1
epoch = []
for k in range(my_aspect['nent']):
kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kind = int(tag.data)
elif kind == FIFF.FIFF_NAVE:
tag = read_tag(fid, pos)
nave = int(tag.data)
elif kind == FIFF.FIFF_EPOCH:
tag = read_tag(fid, pos)
epoch.append(tag)
logger.info(' nave = %d - aspect type = %d'
% (nave, aspect_kind))
nepoch = len(epoch)
if nepoch != 1 and nepoch != info['nchan']:
raise ValueError('Number of epoch tags is unreasonable '
'(nepoch = %d nchan = %d)'
% (nepoch, info['nchan']))
if nepoch == 1:
# Only one epoch
all_data = epoch[0].data.astype(np.float)
# May need a transpose if the number of channels is one
if all_data.shape[1] == 1 and info['nchan'] == 1:
all_data = all_data.T.astype(np.float)
else:
# Put the old style epochs together
all_data = np.concatenate([e.data[None, :] for e in epoch],
axis=0).astype(np.float)
if all_data.shape[1] != nsamp:
raise ValueError('Incorrect number of samples (%d instead of '
' %d)' % (all_data.shape[1], nsamp))
# Calibrate
cals = np.array([info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)
for k in range(info['nchan'])])
all_data *= cals[:, np.newaxis]
times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
self.info = info
# Put the rest together all together
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(str(self._aspect_kind), 'Unknown')
self.first = first
self.last = last
self.comment = comment
self.times = times
self.data = all_data
# bind info, proj, data to self so apply_proj can be used
self.data = all_data
if proj:
self.apply_proj()
# Run baseline correction
self.data = rescale(self.data, times, baseline, 'mean', copy=False)
def save(self, fname):
"""Save dataset to file.
Parameters
----------
fname : string
Name of the file where to save the data.
"""
write_evokeds(fname, self)
def __repr__(self):
s = "comment : '%s'" % self.comment
s += ", time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", n_epochs : %d" % self.nave
s += ", n_channels x n_times : %s x %s" % self.data.shape
return "<Evoked | %s>" % s
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.first = int(inst.times[0] * inst.info['sfreq'])
inst.last = len(inst.times) + inst.first - 1
inst.data = inst.data[:, mask]
return inst
def shift_time(self, tshift, relative=True):
"""Shift time scale in evoked data
Parameters
----------
tshift : float
The amount of time shift to be applied if relative is True
else the first time point. When relative is True, positive value
of tshift moves the data forward while negative tshift moves it
backward.
relative : bool
If true, move the time backwards or forwards by specified amount.
Else, set the starting time point to the value of tshift.
Notes
-----
Maximum accuracy of time shift is 1 / evoked.info['sfreq']
"""
times = self.times
sfreq = self.info['sfreq']
offset = self.first if relative else 0
self.first = int(tshift * sfreq) + offset
self.last = self.first + len(times) - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float) / sfreq
def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
xlim='tight', proj=False, hline=None, units=None, scalings=None,
titles=None, axes=None):
"""Plot evoked data as butterfly plots
Note: If bad channels are not excluded they are shown in red.
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
ylim : dict
ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e-6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
hline : list of floats | None
The values at which show an horizontal line.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
"""
return plot_evoked(self, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=ylim, proj=proj, xlim=xlim,
hline=hline, units=units, scalings=scalings,
titles=titles, axes=axes)
def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r'):
"""Plot evoked data as images
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
clim : dict
clim for images. e.g. clim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
"""
return plot_evoked_image(self, picks=picks, exclude=exclude, unit=unit,
show=show, clim=clim, proj=proj, xlim=xlim,
units=units, scalings=scalings,
titles=titles, axes=axes, cmap=cmap)
def plot_topo(self, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
.. versionadded:: 0.10.0
"""
return plot_evoked_topo(self, layout=layout, layout_scale=layout_scale,
color=color, border=border, ylim=ylim,
scalings=scalings, title=title, proj=proj,
vline=vline, fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, show=show)
def plot_topomap(self, times=None, ch_type=None, layout=None, vmin=None,
vmax=None, cmap='RdBu_r', sensors=True, colorbar=True,
scale=None, scale_time=1e3, unit=None, res=64, size=1,
cbar_fmt="%3.1f", time_format='%01d ms', proj=False,
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None,
axes=None):
"""Plot topographic maps of specific time points
Parameters
----------
times : float | array of floats | None.
The time point(s) to plot. If None, the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.max(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. Defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to ``"%01d ms"``.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
show : bool
Call pyplot.show() at the end.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals:
``dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)``.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be
drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as ``times`` (unless ``times`` is None). If
instance of Axes, ``times`` must be a float or a list of one float.
Defaults to None.
"""
return plot_evoked_topomap(self, times=times, ch_type=ch_type,
layout=layout, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, scale=scale,
scale_time=scale_time,
unit=unit, res=res, proj=proj, size=size,
cbar_fmt=cbar_fmt, time_format=time_format,
show=show, show_names=show_names,
title=title, mask=mask,
mask_params=mask_params,
outlines=outlines, contours=contours,
image_interp=image_interp,
average=average, head_pos=head_pos,
axes=axes)
def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
return plot_evoked_field(self, surf_maps, time=time,
time_label=time_label, n_jobs=n_jobs)
def plot_white(self, noise_cov, show=True):
"""Plot whitened evoked response
Plots the whitened evoked response and the whitened GFP as described in
[1]_. If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise
covariance that has the highest log-likelihood. The left column will
depict the whitened GFPs based on each estimator separately for each
sensor type. Instead of numbers of channels the GFP display shows the
estimated rank. The rank estimation will be printed by the logger for
each noise covariance estimator that is passed.
Parameters
----------
noise_cov : list | instance of Covariance | str
The noise covariance as computed by ``mne.cov.compute_covariance``.
show : bool
Whether to show the figure or not. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
Notes
-----
.. versionadded:: 0.9.0
"""
return _plot_evoked_white(self, noise_cov=noise_cov, scalings=None,
rank=None, show=show)
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual evoked using interpolated fields in mag/grad channels.
.. Warning:: Using virtual evoked to compute inverse can yield
unexpected results. The virtual channels have `'_virtual'` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
Returns
-------
evoked : instance of mne.Evoked
The transformed evoked object containing only virtual channels.
Notes
-----
.. versionadded:: 0.9.0
"""
from .forward import _as_meg_type_evoked
return _as_meg_type_evoked(self, ch_type=ch_type, mode=mode)
def resample(self, sfreq, npad=100, window='boxcar'):
"""Resample data
This function operates in-place.
Parameters
----------
sfreq : float
New sample rate to use
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
"""
o_sfreq = self.info['sfreq']
self.data = resample(self.data, sfreq, o_sfreq, npad, -1, window)
# adjust indirectly affected variables
self.info['sfreq'] = sfreq
self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq +
self.times[0])
self.first = int(self.times[0] * self.info['sfreq'])
self.last = len(self.times) + self.first - 1
def detrend(self, order=1, picks=None):
"""Detrend data
This function operates in-place.
Parameters
----------
order : int
Either 0 or 1, the order of the detrending. 0 is a constant
(DC) detrend, 1 is a linear detrend.
picks : array-like of int | None
If None only MEG and EEG channels are detrended.
"""
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
stim=False, eog=False, ecg=False, emg=False,
exclude='bads')
self.data[picks] = detrend(self.data[picks], order, axis=-1)
def copy(self):
"""Copy the instance of evoked
Returns
-------
evoked : instance of Evoked
"""
evoked = deepcopy(self)
return evoked
def __add__(self, evoked):
"""Add evoked taking into account number of epochs"""
out = combine_evoked([self, evoked])
out.comment = self.comment + " + " + evoked.comment
return out
def __sub__(self, evoked):
"""Add evoked taking into account number of epochs"""
this_evoked = deepcopy(evoked)
this_evoked.data *= -1.
out = combine_evoked([self, this_evoked])
if self.comment is None or this_evoked.comment is None:
warnings.warn('evoked.comment expects a string but is None')
out.comment = 'unknown'
else:
out.comment = self.comment + " - " + this_evoked.comment
return out
def __hash__(self):
return object_hash(dict(info=self.info, data=self.data))
def get_peak(self, ch_type=None, tmin=None, tmax=None, mode='abs',
time_as_index=False):
"""Get location and latency of peak amplitude
Parameters
----------
ch_type : {'mag', 'grad', 'eeg', 'misc', None}
The channel type to use. Defaults to None. If more than one sensor
Type is present in the data the channel type has to be explicitly
set.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
time_as_index : bool
Whether to return the time index instead of the latency in seconds.
Returns
-------
ch_name : str
The channel exhibiting the maximum response.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
supported = ('mag', 'grad', 'eeg', 'misc', 'None')
data_picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False)
types_used = set([channel_type(self.info, idx) for idx in data_picks])
if str(ch_type) not in supported:
raise ValueError('Channel type must be `{supported}`. You gave me '
'`{ch_type}` instead.'
.format(ch_type=ch_type,
supported='` or `'.join(supported)))
elif ch_type is not None and ch_type not in types_used:
raise ValueError('Channel type `{ch_type}` not found in this '
'evoked object.'.format(ch_type=ch_type))
elif len(types_used) > 1 and ch_type is None:
raise RuntimeError('More than one sensor type found. `ch_type` '
'must not be `None`, pass a sensor type '
'value instead')
meg, eeg, misc, picks = False, False, False, None
if ch_type == 'mag':
meg = ch_type
elif ch_type == 'grad':
meg = ch_type
elif ch_type == 'eeg':
eeg = True
elif ch_type == 'misc':
misc = True
if ch_type is not None:
picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc,
ref_meg=False)
data = self.data if picks is None else self.data[picks]
ch_idx, time_idx = _get_peak(data, self.times, tmin,
tmax, mode)
return (self.ch_names[ch_idx],
time_idx if time_as_index else self.times[time_idx])
class EvokedArray(Evoked):
"""Evoked object from numpy array
Parameters
----------
data : array of shape (n_channels, n_times)
The channels' evoked response.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
tmin : float
Start time before event.
comment : string
Comment on dataset. Can be the condition. Defaults to ''.
nave : int
Number of averaged epochs. Defaults to 1.
kind : str
Type of data, either average or standard_error. Defaults to 'average'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
EpochsArray, io.RawArray, create_info
"""
@verbose
def __init__(self, data, info, tmin, comment='', nave=1, kind='average',
verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples)')
if len(info['ch_names']) != np.shape(data)[0]:
raise ValueError('Info (%s) and data (%s) must have same number '
'of channels.' % (len(info['ch_names']),
np.shape(data)[0]))
self.data = data
# XXX: this should use round and be tested
self.first = int(tmin * info['sfreq'])
self.last = self.first + np.shape(data)[-1] - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float) / info['sfreq']
self.info = info
self.nave = nave
self.kind = kind
self.comment = comment
self.picks = None
self.verbose = verbose
self._projector = None
if self.kind == 'average':
self._aspect_kind = _aspect_dict['average']
else:
self._aspect_kind = _aspect_dict['standard_error']
def _get_entries(fid, evoked_node):
"""Helper to get all evoked entries"""
comments = list()
aspect_kinds = list()
for ev in evoked_node:
for k in range(ev['nent']):
my_kind = ev['directory'][k].kind
pos = ev['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comments.append(tag.data)
my_aspect = dir_tree_find(ev, FIFF.FIFFB_ASPECT)[0]
for k in range(my_aspect['nent']):
my_kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if my_kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kinds.append(int(tag.data))
comments = np.atleast_1d(comments)
aspect_kinds = np.atleast_1d(aspect_kinds)
if len(comments) != len(aspect_kinds) or len(comments) == 0:
fid.close()
raise ValueError('Dataset names in FIF file '
'could not be found.')
t = [_aspect_rev.get(str(a), 'Unknown') for a in aspect_kinds]
t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)]
t = ' ' + '\n '.join(t)
return comments, aspect_kinds, t
def _get_evoked_node(fname):
"""Helper to get info in evoked file"""
f, tree, _ = fiff_open(fname)
with f as fid:
_, meas = read_meas_info(fid, tree)
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
return evoked_node
def grand_average(all_evoked, interpolate_bads=True):
"""Make grand average of a list evoked data
The function interpolates bad channels based on `interpolate_bads`
parameter. If `interpolate_bads` is True, the grand average
file will contain good channels and the bad channels interpolated
from the good MEG/EEG channels.
The grand_average.nave attribute will be equal the number
of evoked datasets used to calculate the grand average.
Note: Grand average evoked shall not be used for source localization.
Parameters
----------
all_evoked : list of Evoked data
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated.
Returns
-------
grand_average : Evoked
The grand average data.
Notes
-----
.. versionadded:: 0.9.0
"""
# check if all elements in the given list are evoked data
if not all(isinstance(e, Evoked) for e in all_evoked):
raise ValueError("Not all the elements in list are evoked data")
# Copy channels to leave the original evoked datasets intact.
all_evoked = [e.copy() for e in all_evoked]
# Interpolates if necessary
if interpolate_bads:
all_evoked = [e.interpolate_bads() if len(e.info['bads']) > 0
else e for e in all_evoked]
equalize_channels(all_evoked) # apply equalize_channels
# make grand_average object using combine_evoked
grand_average = combine_evoked(all_evoked, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_evoked)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def combine_evoked(all_evoked, weights='nave'):
"""Merge evoked data by weighted addition
Data should have the same channels and the same time instants.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | str
The weights to apply to the data of each evoked instance.
Can also be ``'nave'`` to weight according to evoked.nave,
or ``"equal"`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
evoked = all_evoked[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_evoked], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_evoked)] * len(all_evoked)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
ch_names = evoked.ch_names
for e in all_evoked[1:]:
assert e.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (evoked, e))
assert np.max(np.abs(e.times - evoked.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (evoked, e))
# use union of bad channels
bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
for ev in all_evoked[1:])))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
evoked.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_evoked))), 1)
return evoked
@verbose
def read_evokeds(fname, condition=None, baseline=None, kind='average',
proj=True, verbose=None):
"""Read evoked dataset(s)
Parameters
----------
fname : string
The file name, which should end with -ave.fif or -ave.fif.gz.
condition : int or str | list of int or str | None
The index or list of indices of the evoked dataset to read. FIF files
can contain multiple datasets. If None, all datasets are returned as a
list.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction. If None do not apply
it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used and if b is None then
b is set to the end of the interval. If baseline is equal to
(None, None) all the time interval is used.
kind : str
Either 'average' or 'standard_error', the type of data to read.
proj : bool
If False, available projectors won't be applied to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : Evoked (if condition is int or str) or list of Evoked (if
condition is None or list)
The evoked dataset(s).
See Also
--------
write_evokeds
"""
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
return_list = True
if condition is None:
evoked_node = _get_evoked_node(fname)
condition = range(len(evoked_node))
elif not isinstance(condition, list):
condition = [condition]
return_list = False
out = [Evoked(fname, c, baseline=baseline, kind=kind, proj=proj,
verbose=verbose) for c in condition]
return out if return_list else out[0]
def write_evokeds(fname, evoked):
"""Write an evoked dataset to a file
Parameters
----------
fname : string
The file name, which should end with -ave.fif or -ave.fif.gz.
evoked : Evoked instance, or list of Evoked instances
The evoked dataset, or list of evoked datasets, to save in one file.
Note that the measurement info from the first evoked instance is used,
so be sure that information matches.
See Also
--------
read_evokeds
"""
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
if not isinstance(evoked, list):
evoked = [evoked]
# Create the file and save the essentials
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if evoked[0].info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
# Write measurement info
write_meas_info(fid, evoked[0].info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
for e in evoked:
start_block(fid, FIFF.FIFFB_EVOKED)
# Comment is optional
if e.comment is not None and len(e.comment) > 0:
write_string(fid, FIFF.FIFF_COMMENT, e.comment)
# First and last sample
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
# The epoch itself
start_block(fid, FIFF.FIFFB_ASPECT)
write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
write_int(fid, FIFF.FIFF_NAVE, e.nave)
decal = np.zeros((e.info['nchan'], 1))
for k in range(e.info['nchan']):
decal[k] = 1.0 / (e.info['chs'][k]['cal'] *
e.info['chs'][k].get('scale', 1.0))
write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
end_block(fid, FIFF.FIFFB_ASPECT)
end_block(fid, FIFF.FIFFB_EVOKED)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _get_peak(data, times, tmin=None, tmax=None, mode='abs'):
"""Get feature-index and time of maximum signal from 2D array
Note. This is a 'getter', not a 'finder'. For non-evoked type
data and continuous signals, please use proper peak detection algorithms.
Parameters
----------
data : instance of numpy.ndarray (n_locations, n_times)
The data, either evoked in sensor or source space.
times : instance of numpy.ndarray (n_times)
The times in seconds.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
Returns
-------
max_loc : int
The index of the feature with the maximum value.
max_time : int
The time point of the maximum response, index.
"""
modes = ('abs', 'neg', 'pos')
if mode not in modes:
raise ValueError('The `mode` parameter must be `{modes}`. You gave '
'me `{mode}`'.format(modes='` or `'.join(modes),
mode=mode))
if tmin is None:
tmin = times[0]
if tmax is None:
tmax = times[-1]
if tmin < times.min():
raise ValueError('The tmin value is out of bounds. It must be '
'within {0} and {1}'.format(times.min(), times.max()))
if tmax > times.max():
raise ValueError('The tmin value is out of bounds. It must be '
'within {0} and {1}'.format(times.min(), times.max()))
if tmin >= tmax:
raise ValueError('The tmin must be smaller than tmax')
time_win = (times >= tmin) & (times <= tmax)
mask = np.ones_like(data).astype(np.bool)
mask[:, time_win] = False
maxfun = np.argmax
if mode == 'pos':
if not np.any(data > 0):
raise ValueError('No positive values encountered. Cannot '
'operate in pos mode.')
elif mode == 'neg':
if not np.any(data < 0):
raise ValueError('No negative values encountered. Cannot '
'operate in neg mode.')
maxfun = np.argmin
masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data,
mask=mask)
max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape)
return max_loc, max_time
|
bsd-3-clause
|
MJuddBooth/pandas
|
pandas/tests/series/test_arithmetic.py
|
2
|
6121
|
# -*- coding: utf-8 -*-
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import Series, compat
from pandas.core.indexes.period import IncompatibleFrequency
import pandas.util.testing as tm
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic(object):
@pytest.mark.parametrize(
'ts',
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(),
lambda x: tm.makeFloatSeries(),
True)
])
@pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv',
'truediv', 'div', 'pow'])
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename('ts')
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
if opname == 'div' and compat.PY3:
pytest.skip('div test only for Py3')
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
class TestSeriesArithmetic(object):
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq('D', how="end")
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison(object):
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
class TestSeriesComparison(object):
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
with pytest.raises(ValueError):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError):
a == b
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([1], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([1], ['bool']))
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt])
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('baz', 'baz', 'baz')])
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range('1949-06-07 03:00:00',
freq='H', periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize('US/Central')
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype('category')
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
|
bsd-3-clause
|
cdr-stats/cdr-stats
|
cdr_stats/aggregator/pandas_cdr.py
|
3
|
8924
|
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.db import connection
from aggregator.aggregate_cdr import condition_switch_id, condition_user
import pandas as pd
from pandas.io import sql
import time
sqlquery = """
SELECT
#DATEDAY_FORMAT#,
#SECOND_INDEX#,
coalesce(nbcalls,0) AS nbcalls,
coalesce(duration,0) AS duration,
coalesce(billsec,0) AS billsec,
coalesce(buy_cost,0) AS buy_cost,
coalesce(sell_cost,0) AS sell_cost
FROM
generate_series(
date_trunc('#INTERVAL#', %(start_date)s),
date_trunc('#INTERVAL#', %(end_date)s),
'1 #INTERVAL#')
as dateday
LEFT OUTER JOIN (
SELECT
date_trunc('#INTERVAL#', starting_date) as time_interval,
#SECOND_INDEX# as #SECOND_INDEX#,
SUM(nbcalls) as nbcalls,
SUM(duration) as duration,
SUM(billsec) as billsec,
SUM(buy_cost) as buy_cost,
SUM(sell_cost) as sell_cost
FROM matv_voip_cdr_aggr_hour
WHERE
starting_date > date_trunc('#INTERVAL#', %(start_date)s) and
starting_date <= date_trunc('#INTERVAL#', %(end_date)s)
#USER_CONDITION#
#SWITCH_CONDITION#
#COUNTRY_CONDITION#
GROUP BY time_interval, #SECOND_INDEX#
) results
ON (dateday = results.time_interval)"""
def conv_timestamp(date):
"""
function to be used by map to convert list of datetime to timestamp
"""
return int(1000 * time.mktime(date.timetuple()))
def get_dataframe_query(query, user, interval, start_date, end_date, switch_id, country_id_list, second_index):
"""
build sql query return the dataframe
"""
upd_query = sqlquery
upd_query = upd_query.replace("#SECOND_INDEX#", second_index)
upd_query = upd_query.replace("#USER_CONDITION#", condition_user(user))
upd_query = upd_query.replace("#DATEDAY_FORMAT#", "dateday AS dateday")
upd_query = upd_query.replace("#SWITCH_CONDITION#", condition_switch_id(switch_id))
upd_query = upd_query.replace("#INTERVAL#", interval)
if country_id_list and len(country_id_list) > 0:
select_country = ", ".join(str(int(l)) for l in country_id_list)
upd_query = upd_query.replace("#COUNTRY_CONDITION#", "AND country_id IN (" + select_country + ")")
else:
upd_query = upd_query.replace("#COUNTRY_CONDITION#", "")
params = {
'start_date': start_date,
'end_date': end_date,
}
df = sql.read_sql_query(upd_query, connection, params=params)
return df
def get_report_cdr_per_switch(user, interval, start_date, end_date, switch_id):
"""
Use pandas to prepare series to display cdr hour report per switch
**Attributes**:
- interval: this could be hour / day / week / month
- start_date: start date
- end_date: end date
- switch_id: id of the switch (0 is all)
"""
series = {}
df = get_dataframe_query(sqlquery, user, interval, start_date, end_date, switch_id,
country_id_list=[], second_index="switch_id")
df.update(df.switch_id.fillna(0))
table = pd.tools.pivot.pivot_table(df,
values=['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost'],
index=['dateday'],
columns=['switch_id'],
fill_value=0)
metric_list = ['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost']
# build a serie for each metric
for metric in metric_list:
series[metric] = {}
# list_columns, ie for switches [1.0, 2.0]
list_columns = table[metric].columns.tolist()
list_columns = map(int, list_columns)
# Transpose
ntable = table[metric].T
# Build the result dictionary
series[metric]['columns'] = list_columns
series[metric]['x_date'] = list(table.index)
# convert into timestamp value
series[metric]['x_timestamp'] = map(conv_timestamp, list(table.index))
series[metric]['values'] = {}
valsum = 0
for col in list_columns:
series[metric]['values'][str(col)] = list(ntable.loc[col])
# valsum += map(sum, list(ntable.loc[col]))
for i in list(ntable.loc[col]):
valsum += i
series[metric]['total'] = valsum
return series
def get_report_cdr_per_country(user, interval, start_date, end_date, switch_id, country_id_list):
"""
Use pandas to prepare series to display cdr hour report per country
**Attributes**:
- interval: this could be hour / day / week / month
- start_date: start date
- end_date: end date
- switch_id: id of the switch (0 is all)
"""
series = {}
df = get_dataframe_query(sqlquery, user, interval, start_date, end_date, switch_id,
country_id_list=country_id_list, second_index="country_id")
# print connection.queries
table = pd.tools.pivot.pivot_table(df,
values=['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost'],
index=['dateday'],
columns=['country_id'],
fill_value=0)
metric_list = ['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost']
# build a serie for each metric
for metric in metric_list:
series[metric] = {}
# list_columns, ie for switches [1.0, 2.0]
list_columns = table[metric].columns.tolist()
list_columns = map(int, list_columns)
# Transpose
ntable = table[metric].T
# Build the result dictionary
series[metric]['columns'] = list_columns
series[metric]['x_date'] = list(table.index)
# convert into timestamp value
series[metric]['x_timestamp'] = map(conv_timestamp, list(table.index))
series[metric]['values'] = {}
valsum = 0
for col in list_columns:
series[metric]['values'][str(col)] = list(ntable.loc[col])
# valsum += map(sum, list(ntable.loc[col]))
for i in list(ntable.loc[col]):
valsum += i
series[metric]['total'] = valsum
return series
def get_dataframe_query_cmp_day(query, user, interval, start_date, end_date, switch_id):
"""
build sql query return the dataframe
"""
upd_query = sqlquery
upd_query = upd_query.replace("#SECOND_INDEX#", "switch_id")
upd_query = upd_query.replace("#USER_CONDITION#", condition_user(user))
upd_query = upd_query.replace("#DATEDAY_FORMAT#", "extract(hour from dateday) as dateday")
upd_query = upd_query.replace("#SWITCH_CONDITION#", condition_switch_id(switch_id))
upd_query = upd_query.replace("#INTERVAL#", interval)
upd_query = upd_query.replace("#COUNTRY_CONDITION#", "")
params = {
'start_date': start_date,
'end_date': end_date,
}
# df = sql.read_sql_query(upd_query, connection, params=params, index_col=["dateday", "switch_id"])
df = sql.read_sql_query(upd_query, connection, params=params)
return df
def get_report_compare_cdr(user, interval, start_date, end_date, switch_id):
"""
Use pandas to prepare series to display cdr hour report per switch
**Attributes**:
- interval: this could be hour / day / week / month
- start_date: start date
- end_date: end date
- switch_id: id of the switch (0 is all)
"""
series = {}
df = get_dataframe_query_cmp_day(sqlquery, user, interval, start_date, end_date, switch_id)
df.update(df.switch_id.fillna(0))
df = df.set_index(["dateday", "switch_id"])
metric_list = ['nbcalls', 'duration', 'billsec', 'buy_cost', 'sell_cost']
# build a serie for each metric
for metric in metric_list:
unstack_df = df[metric].unstack(['switch_id']).fillna(0)
series[metric] = {}
# list_columns, ie for switches [1, 2]
list_columns = unstack_df.columns.values
list_columns = map(int, list_columns)
if 0 in list_columns:
list_columns.remove(0)
# Build the result dictionary
series[metric]['columns'] = list_columns
# series[metric]['x_date'] = unstack_df.index.tolist()
series[metric]['x_date'] = range(0, 23)
series[metric]['values'] = {}
valsum = 0
for col in list_columns:
series[metric]['values'][str(col)] = list(unstack_df.T.loc[col])
valsum += unstack_df.T.loc[col].sum()
series[metric]['total'] = valsum
return series
|
mpl-2.0
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/conftest.py
|
1
|
4681
|
import pytest
import networkx
import sys
import warnings
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
# TODO: The warnings below need to be dealt with, but for now we silence them.
@pytest.fixture(autouse=True)
def set_warnings():
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="literal_stringizer is deprecated*",
)
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="literal_destringizer is deprecated*",
)
warnings.filterwarnings(
"ignore", category=DeprecationWarning, message="is_string_like is deprecated*"
)
warnings.filterwarnings(
"ignore", category=DeprecationWarning, message="make_str is deprecated*"
)
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="context_manager reversed is deprecated*",
)
warnings.filterwarnings(
"ignore",
category=PendingDeprecationWarning,
message="the matrix subclass is not the recommended way*",
)
@pytest.fixture(autouse=True)
def add_nx(doctest_namespace):
doctest_namespace["nx"] = networkx
# What dependencies are installed?
try:
import numpy
has_numpy = True
except ImportError:
has_numpy = False
try:
import scipy
has_scipy = True
except ImportError:
has_scipy = False
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
try:
import pandas
has_pandas = True
except ImportError:
has_pandas = False
try:
import pygraphviz
has_pygraphviz = True
except ImportError:
has_pygraphviz = False
try:
import yaml
has_yaml = True
except ImportError:
has_yaml = False
try:
import pydot
has_pydot = True
except ImportError:
has_pydot = False
try:
import ogr
has_ogr = True
except ImportError:
has_ogr = False
# List of files that pytest should ignore
collect_ignore = []
needs_numpy = [
"algorithms/centrality/current_flow_closeness.py",
"algorithms/node_classification/__init__.py",
"algorithms/non_randomness.py",
"algorithms/shortest_paths/dense.py",
"linalg/bethehessianmatrix.py",
"linalg/laplacianmatrix.py",
"utils/misc.py",
]
needs_scipy = [
"algorithms/assortativity/correlation.py",
"algorithms/assortativity/mixing.py",
"algorithms/assortativity/pairs.py",
"algorithms/bipartite/matrix.py",
"algorithms/bipartite/spectral.py",
"algorithms/centrality/current_flow_betweenness.py",
"algorithms/centrality/current_flow_betweenness_subset.py",
"algorithms/centrality/eigenvector.py",
"algorithms/centrality/katz.py",
"algorithms/centrality/second_order.py",
"algorithms/centrality/subgraph_alg.py",
"algorithms/communicability_alg.py",
"algorithms/link_analysis/hits_alg.py",
"algorithms/link_analysis/pagerank_alg.py",
"algorithms/node_classification/hmn.py",
"algorithms/node_classification/lgc.py",
"algorithms/similarity.py",
"convert_matrix.py",
"drawing/layout.py",
"generators/spectral_graph_forge.py",
"linalg/algebraicconnectivity.py",
"linalg/attrmatrix.py",
"linalg/graphmatrix.py",
"linalg/modularitymatrix.py",
"linalg/spectrum.py",
"utils/rcm.py",
]
needs_matplotlib = ["drawing/nx_pylab.py"]
needs_pandas = ["convert_matrix.py"]
needs_yaml = ["readwrite/nx_yaml.py"]
needs_pygraphviz = ["drawing/nx_agraph.py"]
needs_pydot = ["drawing/nx_pydot.py"]
needs_ogr = ["readwrite/nx_shp.py"]
if not has_numpy:
collect_ignore += needs_numpy
if not has_scipy:
collect_ignore += needs_scipy
if not has_matplotlib:
collect_ignore += needs_matplotlib
if not has_pandas:
collect_ignore += needs_pandas
if not has_yaml:
collect_ignore += needs_yaml
if not has_pygraphviz:
collect_ignore += needs_pygraphviz
if not has_pydot:
collect_ignore += needs_pydot
if not has_ogr:
collect_ignore += needs_ogr
# FIXME: This is to avoid errors on AppVeyor
if sys.platform.startswith("win"):
collect_ignore += ["readwrite/graph6.py", "readwrite/sparse6.py"]
|
gpl-3.0
|
ratnania/pigasus
|
python/gallery/poisson_nonlin.py
|
1
|
10940
|
# -*- coding: UTF-8 -*-
#! /usr/bin/python
import sys
import numpy as np
from scipy.sparse.linalg import spsolve
from .poisson import *
from pigasus.fem.basicPDE import *
from numpy import abs
__all__ = ['poisson_picard', 'poisson_newton']
class poisson_picard(poisson):
"""
A multidimentional nonlinear Poisson class solver using Picard algorithm.
>>> import caid.cad_geometry as cg
>>> from caid.cad_geometry import line
>>> import pylab as pl
"""
#: Doc comment for class attribute gallery.poisson.
#: It can have multiple lines.
def __init__(self, *args, **kwargs):
"""Creates a nonlinear poisson PDE solver based on Picard algorithm.
geometry:
The geometry must be an object cad_geometry.
Returns:
A PDE object.
.. note::
See also: :ref:`fem.gallery.poisson`.
"""
# ...
poisson.__init__(self, *args, **kwargs)
# ...
#-----------------------------------
#-----------------------------------
def initialize(self, u0=None):
U = self.unknown
if u0 is None:
U.set(np.zeros(U.size))
return
# self.project(u0, field=U)
self.interpolate(u0, field=U)
#-----------------------------------
#-----------------------------------
def assembly(self, f=None, update=False):
poisson.assembly(self, f=f, update=update)
#-----------------------------------
#-----------------------------------
def solve(self, F, u0=None, maxiter=100, rtol=1.e-6, rtol2=1.e-6 \
, verbose=False, update=False):
"""
solves the nonlinear poisson equation using PIcard algorithm
F:
the rhs. it can be any function F(U, gradU, ..., x,y)
u0:
this is the initial value for u. Default: all B-splines coeff = 0
maxiter:
the maximum number of iterations for the Picard algorithm. Default 100
rtol:
the relative tolerance. Default 1.e-6
verbose:
True => print the error for each iteration
Returns:
The residual error (as a numpy array)
"""
# assembly the stifness matrix and bc terms
poisson.assembly(self, update=update)
# project u0 onto the discrete vectorial space
self.initialize(u0=u0)
# ...
PDE = self
V = PDE.space
un = PDE.unknown
rhs = self.rhs
# ...
rhs.func = F
# ...
from time import time
list_Err = [1.e6]
list_ErrH1 = [1.e6]
un_values = un.get()
normH1_old = np.dot(PDE.dot(un.get()), un.get())
i = 0
if verbose:
tb = time()
while (list_Err[-1] > rtol) and (list_ErrH1[-1] > rtol2) and (i < maxiter):
U_old_values = un.get()
# print "-------"
# print "solve"
# import matplotlib.pyplot as plt
## Phi = PDE.G_W
# Phi = PDE.unknown_dirichlet
## Phi.plot(withpcolor=True) ; plt.colorbar() ; plt.show()
# Phi.fast_plot() ; plt.colorbar() ; plt.show()
# print "-------"
# assembly the right hand side
rhs.reset()
self.update()
# solve and update unew
poisson.solve(self, rhs)
U_values = un.get()
err = np.linalg.norm(U_values-U_old_values)
list_Err.append(err)
normH1 = np.dot(PDE.dot(un.get()), un.get())
list_ErrH1.append(np.abs(normH1-normH1_old))
normH1_old = normH1
i += 1
if verbose:
print(i, ": "," |F(x)| = ", list_Err[-1]," |DF(x)| = ", list_ErrH1[-1])
if verbose:
te = time()
print(">> Elapsed time ", te-tb)
list_Err = np.asarray(list_Err[1:])
list_ErrH1 = np.asarray(list_ErrH1[1:])
return list_Err, list_ErrH1
#-----------------------------------
class poisson_newton(poisson):
"""
A multidimentional nonlinear Poisson class solver using Picard algorithm.
>>> import caid.cad_geometry as cg
>>> from caid.cad_geometry import line
>>> import pylab as pl
"""
#: Doc comment for class attribute gallery.poisson.
#: It can have multiple lines.
def __init__(self, *args, **kwargs):
"""Creates a nonlinear poisson PDE solver based on Picard algorithm.
geometry:
The geometry must be an object cad_geometry.
Returns:
A PDE object.
.. note::
See also: :ref:`fem.gallery.poisson`.
"""
try:
geometry = kwargs['geometry']
except:
pass
# ...
dim = geometry.dim
if dim == 1:
func_one = lambda x : [ 1. ]
func_zero = lambda x : [ 0. ]
func_stiff = lambda x : [ 1. ]
if dim == 2:
func_one = lambda x,y : [ 1. ]
func_zero = lambda x,y : [ 0. ]
func_stiff = lambda x,y : [ 1., 0. \
, 0., 1. ]
if dim == 3:
func_one = lambda x,y,z : [ 1. ]
func_zero = lambda x,y,z : [ 0. ]
func_stiff = lambda x,y,z : [ 1., 0., 0. \
, 0., 1., 0. \
, 0., 0., 1. ]
# ...
# ...
tc_d = {}
tc_d['A'] = func_stiff
tc_d['b'] = func_zero
try:
tc_d['AllDirichlet'] = kwargs['AllDirichlet']
except:
pass
try:
tc_d['bc_dirichlet'] = kwargs['bc_dirichlet']
except:
pass
try:
tc_d['bc_neumann'] = kwargs['bc_neumann']
except:
pass
try:
tc_d['Metric'] = kwargs['Metric']
except:
pass
# ...
# ...
poisson.__init__(self, *args, **kwargs)
self.Dn = basicPDE(geometry=geometry, testcase=tc_d)
# ...
# ...
#-----------------------------------
# #-----------------------------------
# def __del__(self):
# self.Dn.__del__()
# poisson.__del__(self)
# #-----------------------------------
#-----------------------------------
def free(self):
self.Dn.free()
poisson.free(self)
#-----------------------------------
#-----------------------------------
def initialize(self, u0=None):
U = self.unknown
if u0 is None:
U.set(np.zeros(U.size))
return
# self.project(u0, field=U)
self.interpolate(u0, field=U)
#-----------------------------------
#-----------------------------------
def solve(self, F, dF, u0=None, maxiter=100, rtol=1.e-6 \
, verbose=False, update=False):
"""
solves the nonlinear poisson equation using PIcard algorithm
F:
the rhs. it can be any function F(U, gradU, ..., x,y)
u0:
this is the initial value for u. Default: all B-splines coeff = 0
maxiter:
the maximum number of iterations for the Picard algorithm. Default 100
rtol:
the relative tolerance. Default 1.e-6
verbose:
True => print the error for each iteration
Returns:
The residual error (as a numpy array)
"""
# assembly the stifness matrix and bc terms
poisson.assembly(self, update=update)
self.Dn.assembly()
# project u0 onto the discrete vectorial space
self.initialize(u0=u0)
En = self
Dn = self.Dn
# ...
if En.Dirichlet:
U = En.unknown_dirichlet
else:
U = En.unknown
# ...
# ...
# current values
un = En.unknown
# unew-un
dn = Dn.unknown
# get the right hand side
rhs = En.rhs
# redefine the right hand side function
def rhs_func(x,y):
return F(U,x,y)
rhs.set_func(rhs_func)
def Mn_func(x,y):
return dF(U,x,y)
# get the mass operator
Mn = Dn.mass
# redefine the mass function
Mn.set_func (Mn_func)
# ...
# ...
from time import time
dn.reset()
list_Err = [1.e6]
list_ErrH1 = [1.e6]
un_values = un.get()
i = 0
tb = time()
while (list_Err[-1] > rtol) and (i < maxiter):
# assembly the right hand side
rhs.reset()
En.update()
Dn.assembly()
# compute the right hand side
g = rhs - En.dot (un)
# solve and update unew
Dn.solve (g)
un += dn
err = np.linalg.norm(dn.get())
list_Err.append(err)
err = np.dot(self.Dn.dot(dn.get()), dn.get())
list_ErrH1.append(abs(err))
i += 1
if verbose:
print(i, ": "," |F(x)| = ", list_Err[-1]," |DF(x)| = ", list_ErrH1[-1])
te = time()
print(">> Elapsed time ", te-tb)
list_Err = np.asarray(list_Err[1:])
list_ErrH1 = np.asarray(list_ErrH1[1:])
return list_Err, list_ErrH1
#-----------------------------------
if __name__ == '__main__':
from caid.cad_geometry import circle
from matplotlib import pylab as plt
sin = np.sin ; cos = np.cos ; exp = np.exp ; log = np.log ; sqrt = np.sqrt ; pi = np.pi
nx = 15 ; ny = 15
px = 2 ; py = 2
geo = circle(radius=1./sqrt(2), n=[nx,ny], p=[px,py])
# ...
u_exact = lambda x,y : [- 2.0 * log ( x**2 + y**2 + 0.5 )]
def F(U,x,y):
_U = U.evaluate()
return [4. * exp(_U)]
def dF (U,x, y):
_U = U.evaluate()
return[-4 * exp(_U)]
# ...
AllDirichlet = True
PDE_picard = poisson_picard( geometry=geo \
, AllDirichlet=AllDirichlet )
PDE_newton = poisson_newton( geometry=geo \
, AllDirichlet=AllDirichlet )
# ...
print(">>> Solving using Picard <<<")
# ...
PDE = PDE_picard
if PDE.Dirichlet:
U = PDE.unknown_dirichlet
else:
U = PDE.unknown
# ...
PDE_picard.solve(F, u0=None, maxiter=100, rtol=1.e-6, verbose=True)
print(">>> Solving using Newton <<<")
# ...
PDE = PDE_newton
if PDE.Dirichlet:
U = PDE.unknown_dirichlet
else:
U = PDE.unknown
# ...
PDE_newton.solve(F, dF, u0=None, maxiter=100, rtol=1.e-6, verbose=True)
print("norm using Picard ", PDE_picard.norm(exact=u_exact))
print("norm using Newton ", PDE_newton.norm(exact=u_exact))
# ...
|
mit
|
mhostetter/gnuradio
|
gr-fec/python/fec/polar/channel_construction_awgn.py
|
24
|
8560
|
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Based on 2 papers:
[1] Ido Tal, Alexander Vardy: 'How To Construct Polar Codes', 2013
for an in-depth description of a widely used algorithm for channel construction.
[2] Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
for an overview of different approaches
'''
from scipy.optimize import fsolve
from scipy.special import erfc
from helper_functions import *
from channel_construction_bec import bhattacharyya_bounds
def solver_equation(val, s):
cw_lambda = codeword_lambda_callable(s)
ic_lambda = instantanious_capacity_callable()
return lambda y: ic_lambda(cw_lambda(y)) - val
def solve_capacity(a, s):
eq = solver_equation(a, s)
res = fsolve(eq, 1)
return np.abs(res[0]) # only positive values needed.
def codeword_lambda_callable(s):
return lambda y: np.exp(-2 * y * np.sqrt(2 * s))
def codeword_lambda(y, s):
return codeword_lambda_callable(s)(y)
def instantanious_capacity_callable():
return lambda x : 1 - np.log2(1 + x) + (x * np.log2(x) / (1 + x))
def instantanious_capacity(x):
return instantanious_capacity_callable()(x)
def q_function(x):
# Q(x) = (1 / sqrt(2 * pi) ) * integral (x to inf) exp(- x ^ 2 / 2) dx
return .5 * erfc(x / np.sqrt(2))
def discretize_awgn(mu, design_snr):
'''
needed for Binary-AWGN channels.
in [1] described in Section VI
in [2] described as a function of the same name.
in both cases reduce infinite output alphabet to a finite output alphabet of a given channel.
idea:
1. instantaneous capacity C(x) in interval [0, 1]
2. split into mu intervals.
3. find corresponding output alphabet values y of likelihood ratio function lambda(y) inserted into C(x)
4. Calculate probability for each value given that a '0' or '1' is was transmitted.
'''
s = 10 ** (design_snr / 10)
a = np.zeros(mu + 1, dtype=float)
a[-1] = np.inf
for i in range(1, mu):
a[i] = solve_capacity(1. * i / mu, s)
factor = np.sqrt(2 * s)
tpm = np.zeros((2, mu))
for j in range(mu):
tpm[0][j] = q_function(factor + a[j]) - q_function(factor + a[j + 1])
tpm[1][j] = q_function(-1. * factor + a[j]) - q_function(-1. * factor + a[j + 1])
tpm = tpm[::-1]
tpm[0] = tpm[0][::-1]
tpm[1] = tpm[1][::-1]
return tpm
def instant_capacity_delta_callable():
return lambda a, b: -1. * (a + b) * np.log2((a + b) / 2) + a * np.log2(a) + b * np.log2(b)
def capacity_delta_callable():
c = instant_capacity_delta_callable()
return lambda a, b, at, bt: c(a, b) + c(at, bt) - c(a + at, b + bt)
def quantize_to_size(tpm, mu):
# This is a degrading merge, compare [1]
calculate_delta_I = capacity_delta_callable()
L = np.shape(tpm)[1]
if not mu < L:
print('WARNING: This channel gets too small!')
# lambda works on vectors just fine. Use Numpy vector awesomeness.
delta_i_vec = calculate_delta_I(tpm[0, 0:-1], tpm[1, 0:-1], tpm[0, 1:], tpm[1, 1:])
for i in range(L - mu):
d = np.argmin(delta_i_vec)
ap = tpm[0, d] + tpm[0, d + 1]
bp = tpm[1, d] + tpm[1, d + 1]
if d > 0:
delta_i_vec[d - 1] = calculate_delta_I(tpm[0, d - 1], tpm[1, d - 1], ap, bp)
if d < delta_i_vec.size - 1:
delta_i_vec[d + 1] = calculate_delta_I(ap, bp, tpm[0, d + 1], tpm[1, d + 1])
delta_i_vec = np.delete(delta_i_vec, d)
tpm = np.delete(tpm, d, axis=1)
tpm[0, d] = ap
tpm[1, d] = bp
return tpm
def upper_bound_z_params(z, block_size, design_snr):
upper_bound = bhattacharyya_bounds(design_snr, block_size)
z = np.minimum(z, upper_bound)
return z
def tal_vardy_tpm_algorithm(block_size, design_snr, mu):
mu = mu // 2 # make sure algorithm uses only as many bins as specified.
block_power = power_of_2_int(block_size)
channels = np.zeros((block_size, 2, mu))
channels[0] = discretize_awgn(mu, design_snr) * 2
print('Constructing polar code with Tal-Vardy algorithm')
print('(block_size = {0}, design SNR = {1}, mu = {2}'.format(block_size, design_snr, 2 * mu))
show_progress_bar(0, block_size)
for j in range(0, block_power):
u = 2 ** j
for t in range(u):
show_progress_bar(u + t, block_size)
# print("(u={0}, t={1}) = {2}".format(u, t, u + t))
ch1 = upper_convolve(channels[t], mu)
ch2 = lower_convolve(channels[t], mu)
channels[t] = quantize_to_size(ch1, mu)
channels[u + t] = quantize_to_size(ch2, mu)
z = np.zeros(block_size)
for i in range(block_size):
z[i] = bhattacharyya_parameter(channels[i])
z = z[bit_reverse_vector(np.arange(block_size), block_power)]
z = upper_bound_z_params(z, block_size, design_snr)
show_progress_bar(block_size, block_size)
print('')
print('channel construction DONE')
return z
def merge_lr_based(q, mu):
lrs = q[0] / q[1]
vals, indices, inv_indices = np.unique(lrs, return_index=True, return_inverse=True)
# compare [1] (20). Ordering of representatives according to LRs.
temp = np.zeros((2, len(indices)), dtype=float)
if vals.size < mu:
return q
for i in range(len(indices)):
merge_pos = np.where(inv_indices == i)[0]
sum_items = q[:, merge_pos]
if merge_pos.size > 1:
sum_items = np.sum(q[:, merge_pos], axis=1)
temp[0, i] = sum_items[0]
temp[1, i] = sum_items[1]
return temp
def upper_convolve(tpm, mu):
q = np.zeros((2, mu ** 2))
idx = -1
for i in range(mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2 + tpm[1, i] ** 2) / 2
q[1, idx] = tpm[0, i] * tpm[1, i]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j] + tpm[1, i] * tpm[1, j]
q[1, idx] = tpm[0, i] * tpm[1, j] + tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def lower_convolve(tpm, mu):
q = np.zeros((2, mu * (mu + 1)))
idx = -1
for i in range(0, mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2) / 2
q[1, idx] = (tpm[1, i] ** 2) / 2
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, i]
q[1, idx] = q[0, idx]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j]
q[1, idx] = tpm[1, i] * tpm[1, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, j]
q[1, idx] = tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def swap_values(first, second):
return second, first
def normalize_q(q, tpm):
original_factor = np.sum(tpm)
next_factor = np.sum(q)
factor = original_factor / next_factor
return q * factor
def main():
print 'channel construction AWGN main'
n = 8
m = 2 ** n
design_snr = 0.0
mu = 16
z_params = tal_vardy_tpm_algorithm(m, design_snr, mu)
print(z_params)
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
ashhher3/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
koverholt/bayes-fire
|
Example_Cases/Correlation_Fire_Size/Scripts/pymc_heat_flux_3.py
|
1
|
1272
|
#!/usr/bin/env python
"""
PyMC Radiation Heat Flux Example Series
Example 3: PyMC simulation using different initial values.
In this example, we use the point source radiation model,
but we start from two different initial conditions to
see if they converge to the same posterior distribution.
"""
import matplotlib
matplotlib.use("Agg")
import pylab as pl
import pymc as mc
import models
import graphics
# Generate model and fit model
vars = models.point_source()
vars['theta'].value = [60]
m1 = mc.MCMC(vars)
m1.sample(iter=50000, burn=25000, thin=10)
# Generate model and fit model with different initial value
vars = models.point_source()
vars['theta'].value = [250]
m2 = mc.MCMC(vars)
m2.sample(iter=50000, burn=25000, thin=10)
# Plot traces and model with mean values
pl.figure(figsize=(12,9))
graphics.plot_hf_data()
graphics.plot_point_source_model(m1, color='g', label='Replicate 1')
pl.savefig('../Figures/example_heat_flux_3a.pdf')
graphics.plot_point_source_model(m2, color='b', label='Replicate 2')
pl.savefig('../Figures/example_heat_flux_3b.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m1,
format='pdf',
path='../Figures/example_heat_flux_3',
common_scale=False)
|
bsd-3-clause
|
jlegendary/scikit-learn
|
benchmarks/bench_plot_approximate_neighbors.py
|
85
|
6377
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
bsd-3-clause
|
cms-ttbarAC/CyMiniAna
|
python/hepPlotter/hepPlotterTools.py
|
1
|
11465
|
"""
Created: 1 September 2016
Last Updated: 16 February 2018
Dan Marley
[email protected]
Texas A&M University
-----
Simple functions to help with basic plots.
"""
import os
import ROOT
import numpy as np
from array import array
import matplotlib.pyplot as plt
from Analysis.CyMiniAna.util import Sample
def betterColors():
"""
Better colors for plotting.
In matplotlib 2.0, these are available by default:
> https://matplotlib.org/users/dflt_style_changes.html#colors-color-cycles-and-color-maps
"""
old_colors = [
(31, 119, 180), #blue
(214, 39, 40), #red
(44, 160, 44), #green
(255, 127, 14), #orange
(148, 103, 189), #purple
(227, 119, 194), #pink
(127, 127, 127), #teal
(188, 189, 34), #gray
(23, 190, 207), #green-gold
(140, 86, 75), #brown
# lighter versions
(174, 199, 232), #blue
(255, 152, 150), #red
(152, 223, 138), #green
(255, 187, 120), #orange
(197, 176, 213), #purple
(247, 182, 210), #pink
(158, 218, 229), #teal
(199, 199, 199), #gray
(219, 219, 141), #green-gold
(196, 156, 148), #brown
]
lc = []
for jj in old_colors:
new_color = [i/255. for i in jj]
lc.append(new_color)
ls = ['solid' for _ in lc]
ls += ['dashed' for _ in lc]
return {'linecolors':lc,'linestyles':ls}
def getName(filename):
"""Given a root file full of histograms, return the sample name
example name: TTS_M500_XXX.root
can be customized by users depending on their files
best (future) solution: metadata in root file with "name" option
"""
name = filename.split(".root")[0].split("/")[-1]
return name
def getSampleType(name):
"""Given a sample name return the sample type"""
backgrounds = open("share/sampleNamesShort.txt").readlines()
backgrounds = [i.rstrip("\n") for i in backgrounds]
signal = ['TTS','BBS','TTD','BBD','XX','YY','zprime']
data = ['data']
sampletype = ''
if name=='data':
sampletype = 'data'
elif any(name.startswith(i) for i in signal):
sampletype = 'signal'
elif name in backgrounds:
sampletype = 'background'
else:
sampletype = ''
return sampletype
def getMetadata(metadata_file=None):
"""
Store the xsection & sum of weights using primary dataset as key
'PrimaryDataset XSection sumWeights KFactor NEvents'
"""
samples = {}
if metadata_file is None:
cma_dir = os.getenv("CYMINIANADIR")
metadata = open(cma_dir+"/config/sampleMetaData.txt","r")
else:
metadata = open(metadata_file,"r")
for line in metadata:
if line.startswith("#") or line=="\n": continue
l = line.split()
s = Sample()
s.sampleType = l[0]
s.primaryDataset = l[1]
s.xsection = float(l[2])
s.sumOfWeights = float(l[3])
s.nevents = int(l[5])
samples[l[1]] = s
## Add data (not in metadata files, usually)
names = ['data','mujets','ejets']
pds = ['data','SingleMuon','SingleElectron']
for (name,pd) in zip(names,pds):
data = Sample()
data.sampleType = name
data.primaryDataset = pd
samples[pd] = data
return samples
def hist1d(nbins,bin_low,bin_high):
"""
Set the binning for each histogram.
@param nbins Number of bins in histogram
@param bin_low Lower bin edge
@param bin_high Upper bin edge
"""
binsize = float(bin_high-bin_low)/nbins
arr = array('d', [i*binsize+bin_low for i in xrange(nbins+1)] )
return arr
def data2list(data,weights=None,normed=False,binning=1):
"""Convert array of data into dictionary of information matching 'hist2list' """
data,bins = np.histogram(data,bins=binning,weights=weights,normed=normed)
results = {'data': data,
'error':np.sqrt(data),
'bins': bins,
'center':0.5*(bins[:-1]+bins[1:]),
'width': 0.5*(bins[:-1]-bins[1:])}
return results
def data2list2D(data,weights=None,normed=False,binning=1):
"""Convert array of data into dictionary of information matching 'hist2list' """
try:
x = data['x']
y = data['y']
except:
x = data[0]
y = data[1]
_,bins_x,bins_y = np.histogram2d(x, y, bins=binning,normed=normed,weights=weights)
binnsx = []
binnsy = []
for x in 0.5*(bins_x[:-1]+bins_x[1:]):
for y in 0.5*(bins_y[:-1]+bins_y[1:]):
binnsx.append(x)
binnsy.append(y)
results = {'data': weights,
'error': np.sqrt(weights),
'bins': {'x':bins_x,'y':bins_y},
'center':{'x':binnsx,
'y':binnsy},
'width': {'x':0.5*(bins_x[:-1]-bins_x[1:]),
'y':0.5*(bins_y[:-1]-bins_y[1:])}}
print results
return results
def hist2list(histo,name='',normed=False,reBin=1):
"""Convert ROOT histogram to (dictionary of) lists"""
if not histo.GetSumw2N():
histo.Sumw2()
bin_center = []
bin_content = []
bin_error = [] # just stat. uncertainty (symmetric)
binwidth = []
if normed:
histo.Scale(1./histo.Integral());
try:
histo.Rebin(reBin)
except TypeError:
newname = histo.GetName()+"_"+name
histo.Rebin( len(reBin)-1,newname,reBin)
histo = ROOT.gROOT.FindObject( newname )
binns = [histo.GetXaxis().GetBinLowEdge(1)]
# do one for loop instead of multiple list comprehensions
for i in xrange(1,histo.GetNbinsX()+1):
binns.append(histo.GetXaxis().GetBinUpEdge(i))
bin_center.append(histo.GetBinCenter(i))
bin_content.append(histo.GetBinContent(i))
bin_error.append(histo.GetBinError(i))
binwidth.append(histo.GetXaxis().GetBinWidth(i)/2.)
results = {'data': np.array(bin_content),
'error':np.array(bin_error),
'bins': binns,
'center':bin_center,
'width': binwidth}
return results
def hist2list2D(histo,name='',reBin=None,normed=False):
"""Convert ROOT histogram to list for 2D plots."""
if not histo.GetSumw2N():
histo.Sumw2()
bin_center = {'x':[],'y':[]}
bin_content = []
bin_error = [] # just stat. uncertainty (symmetric)
binwidth = {'x':[],'y':[]}
if normed:
histo.Scale(1./histo.Integral())
## -- Rebin
if reBin is not None:
try:
histo.Rebin2D(reBin,reBin)
except TypeError:
newname = histo.GetName()+"_"+name
old_histo = histo.Clone() # special rebinning, redefine histo
new_x = reBin['x']
new_y = reBin['y']
histo = ROOT.TH2F(old_histo.GetName()+newname,old_histo.GetTitle()+newname,len(new_x)-1,new_x,len(new_y)-1,new_y)
xaxis = old_histo.GetXaxis()
yaxis = old_histo.GetYaxis()
for i in xrange(1,xaxis.GetNbins()):
for j in xrange(1,yaxis.GetNbins()):
histo.Fill(xaxis.GetBinCenter(i),yaxis.GetBinCenter(j),old_histo.GetBinContent(i,j) )
binns = {'x':[histo.GetXaxis().GetBinLowEdge(1)],\
'y':[histo.GetYaxis().GetBinLowEdge(1)]}
# do one for loop instead of multiple list comprehensions
binns['x']+=[histo.GetXaxis().GetBinUpEdge(i) for i in xrange(1,histo.GetNbinsX()+1)]
binns['y']+=[histo.GetYaxis().GetBinUpEdge(j) for j in xrange(1,histo.GetNbinsY()+1)]
for i in xrange(1,histo.GetNbinsX()+1):
for j in xrange(1,histo.GetNbinsY()+1):
bin_center['x'].append(histo.GetXaxis().GetBinCenter(i))
bin_center['y'].append(histo.GetYaxis().GetBinCenter(j))
bin_content.append(histo.GetBinContent(i,j))
bin_error.append(histo.GetBinError(i,j))
binwidth['x'].append(histo.GetXaxis().GetBinWidth(i)/2.)
binwidth['y'].append(histo.GetYaxis().GetBinWidth(i)/2.)
results = {'data': np.array(bin_content),
'error':np.array(bin_error),
'bins': binns,
'center':bin_center,
'width': binwidth}
return results
def TEfficiency2list(histo):
"""Convert TEfficiency to lists. Return dictionary of lists"""
h_histo = histo.GetPassedHistogram()
h_eff = []
h_eff_up = []
h_eff_dn = []
h_eff_mp = []
binwidth = []
binns = [h_histo.GetXaxis().GetBinLowEdge(1)]
for i in xrange(1,h_histo.GetNbinsX()+1):
h_eff.append(histo.GetEfficiency(i))
h_eff_up.append(histo.GetEfficiencyErrorUp(i))
h_eff_dn.append(histo.GetEfficiencyErrorLow(i))
h_eff_mp.append(h_histo.GetXaxis().GetBinCenter(i))
binns.append(h_histo.GetXaxis().GetBinUpEdge(i))
binwidth.append(h_histo.GetXaxis().GetBinWidth(1)/2.)
results = {'data': np.array(h_eff),
'error':[h_eff_dn,h_eff_up],
'bins': binns,
'center':h_eff_mp,
'width': binwidth}
return results
def TEfficiency2list2D(histo):
"""Convert 2D TEfficiency to lists"""
h_histo = histo.GetPassedHistogram()
bin_center = {'x':[],'y':[]}
bin_content = []
bin_error_up = [] # eff uncertainty up -- not necessarily symmetric
bin_error_dn = [] # eff uncertainty down
binwidth = {'x':[],'y':[]}
binns = {'x':[h_histo.GetXaxis().GetBinLowEdge(1)],\
'y':[h_histo.GetYaxis().GetBinLowEdge(1)]}
# do one for loop instead of multiple list comprehensions
binns['x']+=[h_histo.GetXaxis().GetBinUpEdge(i) for i in xrange(1,h_histo.GetNbinsX()+1)]
binns['y']+=[h_histo.GetYaxis().GetBinUpEdge(j) for j in xrange(1,h_histo.GetNbinsY()+1)]
for i in xrange(1,h_histo.GetNbinsX()+1):
for j in xrange(1,h_histo.GetNbinsY()+1):
bin_center['x'].append(h_histo.GetXaxis().GetBinCenter(i))
bin_center['y'].append(h_histo.GetYaxis().GetBinCenter(j))
this_bin = histo.GetGlobalBin(i,j)
bin_content.append(histo.GetEfficiency(this_bin))
bin_error_up.append(histo.GetEfficiencyErrorUp(this_bin))
bin_error_dn.append(histo.GetEfficiencyErrorLow(this_bin))
binwidth['x'].append(h_histo.GetXaxis().GetBinWidth(1)/2.)
binwidth['y'].append(h_histo.GetYaxis().GetBinWidth(1)/2.)
results = {'data': np.array(bin_content),
'error':[bin_error_up,bin_error_dn],
'bins': binns,
'center':bin_center,
'width': binwidth}
return results
def getDataStructure(h_data):
"""
Find the data structure determining the appropriate color scheme.
Only call if the self.colormap attribute is None.
@param h_data The histogram data
@param colorMap Current choice for colormap
"""
max_value = max(h_data)
min_value = min(h_data)
## linear (same sign)
if max_value*min_value > 0:
if max_value>0:
colormap = plt.cm.Reds # positive values
else:
colormap = plt.cm.Blues # negative values
## diverging
else:
colormap = plt.cm.bwr # blue2red map
return colormap
## THE END
|
mit
|
cython-testbed/pandas
|
pandas/tests/sparse/test_array.py
|
1
|
40065
|
from pandas.compat import range
import re
import operator
import pytest
import warnings
from numpy import nan
import numpy as np
import pandas as pd
from pandas.core.sparse.api import SparseArray, SparseSeries, SparseDtype
from pandas._libs.sparse import IntIndex
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray(object):
def setup_method(self, method):
self.arr_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with tm.assert_raises_regex(ValueError, "Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[11])
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[-11])
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with tm.assert_raises_regex(ValueError, msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
def test_bad_take(self):
tm.assert_raises_regex(
IndexError, "bounds", lambda: self.arr.take([11]))
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'")
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
tm.assert_raises_regex(TypeError, "item assignment", setitem)
tm.assert_raises_regex(TypeError, "item assignment", setslice)
def test_constructor_from_too_large_array(self):
tm.assert_raises_regex(TypeError, "expected dimension <= 1 data",
SparseArray, np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with tm.assert_raises_regex(ValueError, 'NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.values),
vals.astype(typ))
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = val
def test_copy_shallow(self):
arr2 = self.arr.copy(deep=False)
assert arr2.sp_values is self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.values, self.arr_data)
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([1, np.nan, 0, 3, 0])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
# see gh-14647
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
SparseArray(vals).to_dense(fill=2)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.values[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.values[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.values[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.values[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul",
"truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.values, second.values),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.values, exp.values)
res2 = op(first, second.values)
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.values, second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.values, 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.values, exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0., 0., 0., 0.])
res = s.fillna(-1)
exp = SparseArray([0., 0., 0., 0.], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
class TestSparseArrayAnalytics(object):
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
@td.skip_if_np_lt_115 # prior didn't dispatch
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
# raises with a different message on py2.
msg = "the \'out\' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.all,
SparseArray(data), out=np.array([]))
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
@td.skip_if_np_lt_115 # prior didn't dispatch
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the \'out\' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.any,
SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), out=out)
@pytest.mark.parametrize("data,expected", [
(np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0]))),
(np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])))
])
@pytest.mark.parametrize("numpy", [True, False])
def test_cumsum(self, data, expected, numpy):
cumsum = np.cumsum if numpy else lambda s: s.cumsum()
out = cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
if numpy: # numpy compatibility checks.
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with tm.assert_raises_regex(ValueError, msg):
SparseArray(data).cumsum(axis=axis)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
def test_nbytes_integer(self):
arr = SparseArray([1, 0, 0, 0, 2], kind='integer')
result = arr.nbytes
# (2 * 8) + 2 * 4
assert result == 24
def test_nbytes_block(self):
arr = SparseArray([1, 2, 0, 0, 0], kind='block')
result = arr.nbytes
# (2 * 8) + 4 + 4
# sp_values, blocs, blenghts
assert result == 24
def test_asarray_datetime64(self):
s = pd.SparseArray(
pd.to_datetime(['2012', None, None, '2013'])
)
np.asarray(s)
def test_setting_fill_value_fillna_still_works():
# This is why letting users update fill_value / dtype is bad
# astype has the same problem.
arr = SparseArray([1., np.nan, 1.0], fill_value=0.0)
arr.fill_value = np.nan
result = arr.isna()
# Can't do direct comparison, since the sp_index will be different
# So let's convert to ndarray and check there.
result = np.asarray(result)
expected = np.array([False, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_setting_fill_value_updates():
arr = SparseArray([0.0, np.nan], fill_value=0)
arr.fill_value = np.nan
# use private constructor to get the index right
# otherwise both nans would be un-stored.
expected = SparseArray._simple_new(
sparse_array=np.array([np.nan]),
sparse_index=IntIndex(2, [1]),
dtype=SparseDtype(float, np.nan),
)
tm.assert_sp_array_equal(arr, expected)
@pytest.mark.parametrize("arr, loc", [
([None, 1, 2], 0),
([0, None, 2], 1),
([0, 1, None], 2),
([0, 1, 1, None, None], 3),
([1, 1, 1, 2], -1),
([], -1),
])
def test_first_fill_value_loc(arr, loc):
result = SparseArray(arr)._first_fill_value_loc()
assert result == loc
@pytest.mark.parametrize('arr', [
[1, 2, np.nan, np.nan],
[1, np.nan, 2, np.nan],
[1, 2, np.nan],
])
@pytest.mark.parametrize("fill_value", [
np.nan, 0, 1
])
def test_unique_na_fill(arr, fill_value):
a = pd.SparseArray(arr, fill_value=fill_value).unique()
b = pd.Series(arr).unique()
assert isinstance(a, SparseArray)
a = np.asarray(a)
tm.assert_numpy_array_equal(a, b)
def test_map():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, 12], fill_value=10)
# dict
result = arr.map({0: 10, 1: 11, 2: 12})
tm.assert_sp_array_equal(result, expected)
# series
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
tm.assert_sp_array_equal(result, expected)
# function
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
expected = SparseArray([10, 11, 12], fill_value=10)
tm.assert_sp_array_equal(result, expected)
def test_map_missing():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, None], fill_value=10)
result = arr.map({0: 10, 1: 11})
tm.assert_sp_array_equal(result, expected)
|
bsd-3-clause
|
clemkoa/scikit-learn
|
sklearn/decomposition/pca.py
|
5
|
29976
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
# Giorgio Patrini <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from scipy.sparse import issparse
from scipy.sparse.linalg import svds
from ..externals import six
from .base import _BasePCA
from ..base import BaseEstimator, TransformerMixin
from ..utils import deprecated
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_logdet, randomized_svd, svd_flip
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum : array of shape (n)
Data spectrum.
rank : int
Tested rank value.
n_samples : int
Number of samples.
n_features : int
Number of features.
Returns
-------
ll : float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(_BasePCA):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
It can also use the scipy.sparse.linalg ARPACK implementation of the
truncated SVD.
Notice that this class does not support sparse input. See
:class:`TruncatedSVD` for an alternative with sparse data.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle' and svd_solver == 'full', Minka\'s MLE is used
to guess the dimension
if ``0 < n_components < 1`` and svd_solver == 'full', select the number
of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components.
If svd_solver == 'arpack', the number of components must be strictly
less than the minimum of n_features and n_samples.
Hence, the None case results in:
n_components == min(n_samples, n_features) - 1
copy : bool (default True)
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional (default False)
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : string {'auto', 'full', 'arpack', 'randomized'}
auto :
the solver is selected by a default policy based on `X.shape` and
`n_components`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient 'randomized'
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
full :
run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
arpack :
run SVD truncated to n_components calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
0 < n_components < min(X.shape)
randomized :
run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
tol : float >= 0, optional (default .0)
Tolerance for singular values computed by svd_solver == 'arpack'.
.. versionadded:: 0.18.0
iterated_power : int >= 0, or 'auto', (default 'auto')
Number of iterations for the power method computed by
svd_solver == 'randomized'.
.. versionadded:: 0.18.0
random_state : int, RandomState instance or None, optional (default None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``svd_solver`` == 'arpack' or 'randomized'.
.. versionadded:: 0.18.0
Attributes
----------
components_ : array, shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
``explained_variance_``.
explained_variance_ : array, shape (n_components,)
The amount of variance explained by each of the selected components.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
.. versionadded:: 0.18
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of the ratios is equal to 1.0.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
mean_ : array, shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or the lesser value of n_features and n_samples
if n_components is None.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Equal to the average of (min(n_features, n_samples) - n_components)
smallest eigenvalues of the covariance matrix of X.
References
----------
For n_components == 'mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
`Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
`A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> print(pca.singular_values_) # doctest: +ELLIPSIS
[ 6.30061... 0.54980...]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,
svd_solver='full', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> print(pca.singular_values_) # doctest: +ELLIPSIS
[ 6.30061... 0.54980...]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(copy=True, iterated_power='auto', n_components=1, random_state=None,
svd_solver='arpack', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244...]
>>> print(pca.singular_values_) # doctest: +ELLIPSIS
[ 6.30061...]
See also
--------
KernelPCA
SparsePCA
TruncatedSVD
IncrementalPCA
"""
def __init__(self, n_components=None, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power='auto',
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
# Raise an error for sparse input.
# This is more informative than the generic one raised by check_array.
if issparse(X):
raise TypeError('PCA does not support sparse input. See '
'TruncatedSVD for a possible alternative.')
X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True,
copy=self.copy)
# Handle n_components==None
if self.n_components is None:
if self.svd_solver != 'arpack':
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
# Handle svd_solver
svd_solver = self.svd_solver
if svd_solver == 'auto':
# Small problem, just call full PCA
if max(X.shape) <= 500:
svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
svd_solver = 'randomized'
# This is also the case of n_components in (0,1)
else:
svd_solver = 'full'
# Call different fits for either full or truncated SVD
if svd_solver == 'full':
return self._fit_full(X, n_components)
elif svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, svd_solver)
else:
raise ValueError("Unrecognized svd_solver='{0}'"
"".format(svd_solver))
def _fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X"""
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, V
def _fit_truncated(self, X, n_components, svd_solver):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X
"""
n_samples, n_features = X.shape
if isinstance(n_components, six.string_types):
raise ValueError("n_components=%r cannot be a string "
"with svd_solver='%s'"
% (n_components, svd_solver))
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
elif svd_solver == 'arpack' and n_components == min(n_samples,
n_features):
raise ValueError("n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features),
svd_solver))
random_state = check_random_state(self.random_state)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if svd_solver == 'arpack':
# random init solution, as ARPACK does it internally
v0 = random_state.uniform(-1, 1, size=min(X.shape))
U, S, V = svds(X, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U[:, ::-1], V[::-1])
elif svd_solver == 'randomized':
# sign flipping is done inside
U, S, V = randomized_svd(X, n_components=n_components,
n_iter=self.iterated_power,
flip_sign=True,
random_state=random_state)
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = V
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = np.var(X, ddof=1, axis=0)
self.explained_variance_ratio_ = \
self.explained_variance_ / total_var.sum()
self.singular_values_ = S.copy() # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = (total_var.sum() -
self.explained_variance_.sum())
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.
return U, S, V
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array, shape(n_samples, n_features)
The data.
Returns
-------
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) -
fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array, shape(n_samples, n_features)
The data.
y : Ignored
Returns
-------
ll : float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
@deprecated("RandomizedPCA was deprecated in 0.18 and will be removed in "
"0.20. "
"Use PCA(svd_solver='randomized') instead. The new implementation "
"DOES NOT store whiten ``components_``. Apply transform to get "
"them.")
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`PCA` with parameter svd_solver 'randomized' instead.
The new implementation DOES NOT store whiten ``components_``.
Apply transform to get them.
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, default=2
Number of iterations for the power method.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are multiplied
by the square root of (n_samples) and divided by the singular values to
ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If k is not set then all components are stored and the sum of explained
variances is equal to 1.0.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
mean_ : array, shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> print(pca.singular_values_) # doctest: +ELLIPSIS
[ 6.30061... 0.54980...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / (n_samples - 1)
full_var = np.var(X, ddof=1, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = S # Store the singular values.
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = np.dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return np.dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = np.dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
bsd-3-clause
|
SeldonIO/seldon-server
|
python/seldon/xgb.py
|
3
|
8326
|
import sys
import numpy as np
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
import scipy.sparse
import math
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from seldon.pipeline.pandas_pipelines import BasePandasEstimator
from collections import OrderedDict
import io
from sklearn.utils import check_X_y
from sklearn.utils import check_array
from sklearn.base import BaseEstimator,ClassifierMixin
import logging
logger = logging.getLogger(__name__)
class XGBoostClassifier(BasePandasEstimator,BaseEstimator,ClassifierMixin):
"""
Wrapper for XGBoost classifier with pandas support
XGBoost specific arguments follow https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py
Parameters
----------
target : str
Target column
target_readable : str
More descriptive version of target variable
included : list str, optional
columns to include
excluded : list str, optional
columns to exclude
id_map : dict (int,str), optional
map of class ids to high level names
num_iterations : int
number of iterations over data to run vw
raw_predictions : str
file to push raw predictions from vw to
max_depth : int
Maximum tree depth for base learners.
learning_rate : float
Boosting learning rate (xgb's "eta")
n_estimators : int
Number of boosted trees to fit.
silent : boolean
Whether to print messages while running boosting.
objective : string
Specify the learning task and the corresponding learning objective.
nthread : int
Number of parallel threads used to run xgboost.
gamma : float
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : int
Minimum sum of instance weight(hessian) needed in a child.
max_delta_step : int
Maximum delta step we allow each tree's weight estimation to be.
subsample : float
Subsample ratio of the training instance.
colsample_bytree : float
Subsample ratio of columns when constructing each tree.
colsample_bylevel : float
Subsample ratio of columns for each split, in each level.
reg_alpha : float (xgb's alpha)
L2 regularization term on weights
reg_lambda : float (xgb's lambda)
L1 regularization term on weights
scale_pos_weight : float
Balancing of positive and negative weights.
base_score:
The initial prediction score of all instances, global bias.
seed : int
Random number seed.
missing : float, optional
Value in the data which needs to be present as a missing value. If
None, defaults to np.nan.
"""
def __init__(self, target=None, target_readable=None,included=None,excluded=None,clf=None,
id_map={},vectorizer=None,svmlight_feature=None,
max_depth=3, learning_rate=0.1, n_estimators=100,
silent=True, objective="reg:linear",
nthread=-1, gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1, colsample_bylevel=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None):
super(XGBoostClassifier, self).__init__(target,target_readable,included,excluded,id_map)
self.vectorizer = vectorizer
self.clf = clf
self.max_depth=max_depth
self.learning_rate=learning_rate
self.n_estimators=n_estimators
self.silent=silent
self.objective=objective
self.nthread=nthread
self.gamma=gamma
self.min_child_weight=min_child_weight
self.max_delta_step=max_delta_step
self.subsample=subsample
self.colsample_bytree=colsample_bytree
self.colsample_bylevel=colsample_bylevel
self.reg_alpha=reg_alpha
self.reg_lambda=reg_lambda
self.scale_pos_weight=scale_pos_weight
self.base_score=base_score
self.seed=seed
self.missing=missing
#self.params = { "max_depth":max_depth,"learning_rate":learning_rate,"n_estimators":n_estimators,
# "silent":silent, "objective":objective,
# "nthread":nthread, "gamma":gamma, "min_child_weight":min_child_weight, "max_delta_step":max_delta_step,
# "subsample":subsample, "colsample_bytree":colsample_bytree, "colsample_bylevel":colsample_bylevel,
# "reg_alpha":reg_alpha, "reg_lambda":reg_lambda, "scale_pos_weight":scale_pos_weight,
# "base_score":base_score, "seed":seed, "missing":missing }
self.svmlight_feature = svmlight_feature
def _to_svmlight(self,row):
"""Convert a dataframe row containing a dict of id:val to svmlight line
"""
if self.target in row:
line = str(row[self.target])
else:
line = "1"
d = row[self.svmlight_feature]
for (k,v) in d:
line += (" "+str(k)+":"+str(v))
return line
def _load_from_svmlight(self,df):
"""Load data from dataframe with dict of id:val into numpy matrix
"""
logger.info("loading from dictionary feature")
df_svm = df.apply(self._to_svmlight,axis=1)
output = io.BytesIO()
df_svm.to_csv(output,index=False,header=False)
output.seek(0)
(X,y) = load_svmlight_file(output,zero_based=False)
output.close()
return (X,y)
def fit(self,X,y=None):
"""Fit a model:
Parameters
----------
X : pandas dataframe or array-like
training samples. If pandas dataframe can handle dict of feature in one column or cnvert a set of columns
y : array like, required for array-like X and not used presently for pandas dataframe
class labels
Returns
-------
self: object
"""
if isinstance(X,pd.DataFrame):
df = X
if not self.svmlight_feature is None:
if not self.target_readable is None:
self.create_class_id_map(df,self.target,self.target_readable)
(X,y) = self._load_from_svmlight(df)
num_class = len(np.unique(y))
else:
(X,y,self.vectorizer) = self.convert_numpy(df)
num_class = len(y.unique())
else:
check_X_y(X,y)
num_class = len(np.unique(y))
self.clf = xgb.XGBClassifier(max_depth=self.max_depth, learning_rate=self.learning_rate,
n_estimators=self.n_estimators,
silent=self.silent, objective=self.objective,
nthread=self.nthread, gamma=self.gamma,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
subsample=self.subsample, colsample_bytree=self.colsample_bytree,
colsample_bylevel=self.colsample_bylevel,
reg_alpha=self.reg_alpha, reg_lambda=self.reg_lambda,
scale_pos_weight=self.scale_pos_weight,
base_score=self.base_score, seed=self.seed, missing=self.missing)
logger.info(self.clf.get_params(deep=True))
self.clf.fit(X,y,verbose=True)
return self
def predict_proba(self, X):
"""
Returns class probability estimates for the given test data.
X : pandas dataframe or array-like
Test samples
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class probability estimates.
"""
if isinstance(X,pd.DataFrame):
df = X
if not self.svmlight_feature is None:
(X,_) = self._load_from_svmlight(df)
else:
(X,_,_) = self.convert_numpy(df)
else:
check_array(X)
return self.clf.predict_proba(X)
|
apache-2.0
|
mxjl620/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
225
|
6278
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
duncanwp/iris
|
docs/iris/example_code/General/orca_projection.py
|
12
|
1727
|
"""
Tri-Polar Grid Projected Plotting
=================================
This example demonstrates cell plots of data on the semi-structured ORCA2 model
grid.
First, the data is projected into the PlateCarree coordinate reference system.
Second four pcolormesh plots are created from this projected dataset,
using different projections for the output image.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import iris
import iris.analysis.cartography
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load data
filepath = iris.sample_data_path('orca2_votemper.nc')
cube = iris.load_cube(filepath)
# Choose plot projections
projections = {}
projections['Mollweide'] = ccrs.Mollweide()
projections['PlateCarree'] = ccrs.PlateCarree()
projections['NorthPolarStereo'] = ccrs.NorthPolarStereo()
projections['Orthographic'] = ccrs.Orthographic(central_longitude=-90,
central_latitude=45)
pcarree = projections['PlateCarree']
# Transform cube to target projection
new_cube, extent = iris.analysis.cartography.project(cube, pcarree,
nx=400, ny=200)
# Plot data in each projection
for name in sorted(projections):
fig = plt.figure()
fig.suptitle('ORCA2 Data Projected to {}'.format(name))
# Set up axes and title
ax = plt.subplot(projection=projections[name])
# Set limits
ax.set_global()
# plot with Iris quickplot pcolormesh
qplt.pcolormesh(new_cube)
# Draw coastlines
ax.coastlines()
iplt.show()
if __name__ == '__main__':
main()
|
lgpl-3.0
|
zangsir/sms-tools
|
software/models_interface/spsModel_function.py
|
1
|
3514
|
# function to call the main analysis/synthesis functions in software/models/spsModel.py
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import spsModel as SPS
import utilFunctions as UF
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02,
maxnSines=150, freqDevOffset=10, freqDevSlope=0.001, stocf=0.2):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
stocf: decimation factor used for the stochastic approximation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# perform sinusoidal+sotchastic analysis
tfreq, tmag, tphase, stocEnv = SPS.spsModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope, stocf)
# synthesize sinusoidal+stochastic model
y, ys, yst = SPS.spsModelSynth(tfreq, tmag, tphase, stocEnv, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel_sines.wav'
outputFileStochastic = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel_stochastic.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel.wav'
# write sounds files for sinusoidal, residual, and the sum
UF.wavwrite(ys, fs, outputFileSines)
UF.wavwrite(yst, fs, outputFileStochastic)
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 10000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
plt.subplot(3,1,2)
numFrames = int(stocEnv[:,0].size)
sizeEnv = int(stocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot sinusoidal frequencies on top of stochastic component
if (tfreq.shape[1] > 0):
sines = tfreq*np.less(tfreq,maxplotfreq)
sines[sines==0] = np.nan
numFrames = int(sines[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, sines, color='k', ms=3, alpha=1)
plt.xlabel('time(s)')
plt.ylabel('Frequency(Hz)')
plt.autoscale(tight=True)
plt.title('sinusoidal + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.ion()
plt.show()
if __name__ == "__main__":
main()
|
agpl-3.0
|
Tong-Chen/scikit-learn
|
sklearn/tree/tree.py
|
1
|
29287
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import array2d, check_random_state
from ..utils.fixes import unique
from ..utils.validation import check_arrays
from ._tree import Criterion, Splitter, Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True,
sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if X_argsorted is not None:
warn("The X_argsorted parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
if check_input:
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense",
check_ccontiguous=True)
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = (2 ** 31) - 1 if self.max_depth is None else self.max_depth
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_,
self.n_outputs_, splitter, max_depth,
min_samples_split, self.min_samples_leaf)
self.tree_.build(X, y, sample_weight=sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`feature_importances_` : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
|
bsd-3-clause
|
hagne/atm-py
|
atmPy/atmosphere/standards.py
|
1
|
5525
|
# -*- coding: utf-8 -*-
"""
This module contains atmospheric constands and standards.
@author: Hagen
"""
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
class Standard_Atmosphere(object):
def __init__(self, standard='international'):
"""Returns pressure, temperature, and/or altitude as a function of pressure, or altitude for the standard international atmosphere
columns=['Altitude_meter', 'Pressure_mbar', 'Temperature_K'])
Arguments
---------
value: float or ndarray.
Depending on the keyword "quantity" this is:
- altitude in meters.
- pressure in mbar.
quantity: 'altitude' or 'pressure'.
quantaty of the argument value.
standard: 'US' or 'international'.
defines which standard is used.
return_standard: bool, optional.
if True argument "value" and "quantity" are ignored and a pandas dataTable with the standard is returned.
Returns
-------
tuple of two floats or two ndarrays depending on type of h:
First quantaty the tuple is pressure in mbar or altitude in meter, second is temperatur in Kelvin.
"""
if standard == 'international':
self.altitude_standard = np.array([-610, 11000, 20000, 32000, 47000, 51000, 71000, 84852]).astype(float)
self.pressure_standard = np.array([108900, 22632, 5474.9, 868.02, 110.91, 66.939, 3.9564, 0.3734]) / 100.
self.temp_standard = np.array([19, -56.5, -56.5, -44.5, -2.5, -2.5, -58.5, -86.28]) + 273.15
elif standard == 'US':
self.altitude_standard = np.array([0, 11000, 20000, 32000, 47000, 51000, 71000]).astype(float)
self.pressure_standard = np.array([101325, 22632.1, 5474.89, 868.019, 110.906, 66.9389, 3.95642]) / 100.
self.temp_standard = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65])
else:
raise TypeError('No standard with the name "%s" is defined' % standard)
self.updated = True
def updated(self):
self.__pressure = None
self.__temp = None
self.__density = None
self.__altitude = None
# if quantity == 'altitude':
@property
def pressure(self):
if np.any(self.__altitude):
pressure_int = interp1d(self.altitude_standard, np.log(self.pressure_standard), kind='cubic')
self.__pressure = np.exp(pressure_int(self.__altitude))
else:
raise ValueError('got to give me something?')
return self.__pressure
@pressure.setter
def pressure(self, value):
self.updated()
self.__pressure = value
# elif quantity == 'pressure':
# alt_int = interp1d(np.log(pressure), alt, kind='cubic')
# alt_n = alt_int(np.log(value))
# out = alt_n
# value = alt_n
# else:
# raise TypeError('Quantity "$s$" is not an option' % quantity)
#
# tmp_int = interp1d(alt, tmp, kind='linear')
# tmp_n = tmp_int(value)
# return out, tmp_n
def standard_atmosphere(value, quantity='altitude', standard='international', return_standard=False):
"""Returns pressure, temperature, and/or altitude as a function of pressure, or altitude for the standard international atmosphere
Arguments
---------
value: float or ndarray.
Depending on the keyword "quantity" this is:
- altitude in meters.
- pressure in mbar.
quantity: 'altitude' or 'pressure'.
quantaty of the argument value.
standard: 'US' or 'international'.
defines which standard is used.
return_standard: bool, optional.
if True argument "value" and "quantity" are ignored and a pandas dataTable with the standard is returned.
Returns
-------
tuple of two floats or two ndarrays depending on type of h:
First quantaty the tuple is pressure in mbar or altitude in meter, second is temperatur in Kelvin.
"""
if standard == 'international':
alt = np.array([-610, 11000, 20000, 32000, 47000, 51000, 71000, 84852]).astype(float)
pressure = np.array([108900, 22632, 5474.9, 868.02, 110.91, 66.939, 3.9564, 0.3734]) / 100.
tmp = np.array([19, -56.5, -56.5, -44.5, -2.5, -2.5, -58.5, -86.28]) + 273.15
elif standard == 'US':
alt = np.array([0, 11000, 20000, 32000, 47000, 51000, 71000]).astype(float)
pressure = np.array([101325, 22632.1, 5474.89, 868.019, 110.906, 66.9389, 3.95642]) / 100.
tmp = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65])
else:
raise TypeError('No standard with the name "%s" is defined' % standard)
if return_standard:
return pd.DataFrame(np.array([alt, pressure, tmp]).transpose(),
columns=['Altitude_meter', 'Pressure_mbar', 'Temperature_K'])
if quantity == 'altitude':
pressure_int = interp1d(alt, np.log(pressure), kind='cubic')
press_n = np.exp(pressure_int(value))
out = press_n
elif quantity == 'pressure':
value.loc[value < pressure.min()] = pressure.min()
alt_int = interp1d(np.log(pressure), alt, kind='cubic')
alt_n = alt_int(np.log(value))
out = alt_n
value = alt_n
else:
raise TypeError('Quantity "$s$" is not an option' % quantity)
tmp_int = interp1d(alt, tmp, kind='linear')
tmp_n = tmp_int(value)
return out, tmp_n
|
mit
|
djpine/pyman
|
Book/chap9/Supporting Materials/specFuncPlots.py
|
3
|
2545
|
import numpy as np
import scipy.special
import matplotlib.pyplot as plt
# create a figure window
fig = plt.figure(1, figsize=(9,8))
# create arrays for a few Bessel functions and plot them
x = np.linspace(0, 20, 256)
j0 = scipy.special.jn(0, x)
j1 = scipy.special.jn(1, x)
y0 = scipy.special.yn(0, x)
y1 = scipy.special.yn(1, x)
ax1 = fig.add_subplot(321)
ax1.plot(x,j0, x,j1, x,y0, x,y1)
ax1.axhline(color="grey", ls="--", zorder=-1)
ax1.set_ylim(-1,1)
ax1.text(0.5, 0.95,'Bessel', ha='center', va='top',
transform = ax1.transAxes)
# gamma function
x = np.linspace(-3.5, 6., 3601)
g = scipy.special.gamma(x)
g = np.ma.masked_outside(g, -100, 400)
ax2 = fig.add_subplot(322)
ax2.plot(x,g)
ax2.set_xlim(-3.5, 6)
ax2.axhline(color="grey", ls="--", zorder=-1)
ax2.axvline(color="grey", ls="--", zorder=-1)
ax2.set_ylim(-20, 100)
ax2.text(0.5, 0.95,'Gamma', ha='center', va='top',
transform = ax2.transAxes)
# error function
x = np.linspace(0, 2.5, 256)
ef = scipy.special.erf(x)
ax3 = fig.add_subplot(323)
ax3.plot(x,ef)
ax3.set_ylim(0,1.1)
ax3.text(0.5, 0.95,'Error', ha='center', va='top',
transform = ax3.transAxes)
# Airy function
x = np.linspace(-15, 4, 256)
ai, aip, bi, bip = scipy.special.airy(x)
ax4 = fig.add_subplot(324)
ax4.plot(x,ai, x,bi)
ax4.axhline(color="grey", ls="--", zorder=-1)
ax4.axvline(color="grey", ls="--", zorder=-1)
ax4.set_xlim(-15,4)
ax4.set_ylim(-0.5,0.6)
ax4.text(0.5, 0.95,'Airy', ha='center', va='top',
transform = ax4.transAxes)
# Legendre polynomials
x = np.linspace(-1, 1, 256)
lp0 = np.polyval(scipy.special.legendre(0),x)
lp1 = np.polyval(scipy.special.legendre(1),x)
lp2 = np.polyval(scipy.special.legendre(2),x)
lp3 = np.polyval(scipy.special.legendre(3),x)
ax5 = fig.add_subplot(325)
ax5.plot(x,lp0, x,lp1, x,lp2, x,lp3)
ax5.axhline(color="grey", ls="--", zorder=-1)
ax5.axvline(color="grey", ls="--", zorder=-1)
ax5.set_ylim(-1,1.1)
ax5.text(0.5, 0.9,'Legendre', ha='center', va='top',
transform = ax5.transAxes)
# Laguerre polynomials
x = np.linspace(-5, 8, 256)
lg0 = np.polyval(scipy.special.laguerre(0),x)
lg1 = np.polyval(scipy.special.laguerre(1),x)
lg2 = np.polyval(scipy.special.laguerre(2),x)
lg3 = np.polyval(scipy.special.laguerre(3),x)
ax6 = fig.add_subplot(326)
ax6.plot(x,lg0, x,lg1, x,lg2, x,lg3)
ax6.axhline(color="grey", ls="--", zorder=-1)
ax6.axvline(color="grey", ls="--", zorder=-1)
ax6.set_xlim(-5,8)
ax6.set_ylim(-5,10)
ax6.text(0.5, 0.9,'Laguerre', ha='center', va='top',
transform = ax6.transAxes)
plt.savefig("specFuncPlots.pdf")
plt.show()
|
cc0-1.0
|
moutai/scikit-learn
|
sklearn/gaussian_process/tests/test_kernels.py
|
24
|
11602
|
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
|
bsd-3-clause
|
stiphyMT/plantcv
|
tests/tests.py
|
1
|
284568
|
#!/usr/bin/env python
import pytest
import os
import shutil
import json
import numpy as np
import cv2
import sys
import pandas as pd
from plotnine import ggplot
from plantcv import plantcv as pcv
import plantcv.learn
import plantcv.parallel
import plantcv.utils
# Import matplotlib and use a null Template to block plotting to screen
# This will let us test debug = "plot"
import matplotlib
import matplotlib.pyplot as plt
import dask
from dask.distributed import Client
PARALLEL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parallel_data")
TEST_TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".cache")
TEST_IMG_DIR = "images"
TEST_IMG_DIR2 = "images_w_date"
TEST_SNAPSHOT_DIR = "snapshots"
TEST_PIPELINE = os.path.join(PARALLEL_TEST_DATA, "plantcv-script.py")
META_FIELDS = {"imgtype": 0, "camera": 1, "frame": 2, "zoom": 3, "lifter": 4, "gain": 5, "exposure": 6, "id": 7}
VALID_META = {
# Camera settings
"camera": {
"label": "camera identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"imgtype": {
"label": "image type",
"datatype": "<class 'str'>",
"value": "none"
},
"zoom": {
"label": "camera zoom setting",
"datatype": "<class 'str'>",
"value": "none"
},
"exposure": {
"label": "camera exposure setting",
"datatype": "<class 'str'>",
"value": "none"
},
"gain": {
"label": "camera gain setting",
"datatype": "<class 'str'>",
"value": "none"
},
"frame": {
"label": "image series frame identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"lifter": {
"label": "imaging platform height setting",
"datatype": "<class 'str'>",
"value": "none"
},
# Date-Time
"timestamp": {
"label": "datetime of image",
"datatype": "<class 'datetime.datetime'>",
"value": None
},
# Sample attributes
"id": {
"label": "image identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"plantbarcode": {
"label": "plant barcode identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"treatment": {
"label": "treatment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"cartag": {
"label": "plant carrier identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Experiment attributes
"measurementlabel": {
"label": "experiment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Other
"other": {
"label": "other identifier",
"datatype": "<class 'str'>",
"value": "none"
}
}
METADATA_COPROCESS = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_VIS_ONLY = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_NIR_ONLY = {
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
# Set the temp directory for dask
dask.config.set(temporary_directory=TEST_TMPDIR)
# ##########################
# Tests setup function
# ##########################
def setup_function():
if not os.path.exists(TEST_TMPDIR):
os.mkdir(TEST_TMPDIR)
# ##############################
# Tests for the parallel subpackage
# ##############################
def test_plantcv_parallel_workflowconfig_save_config_file():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_save_config_file")
os.mkdir(cache_dir)
# Define output path/filename
template_file = os.path.join(cache_dir, "config.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Save template file
config.save_config(config_file=template_file)
assert os.path.exists(template_file)
def test_plantcv_parallel_workflowconfig_import_config_file():
# Define input path/filename
config_file = os.path.join(PARALLEL_TEST_DATA, "workflow_config_template.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# import config file
config.import_config(config_file=config_file)
assert config.cluster == "LocalCluster"
def test_plantcv_parallel_workflowconfig_validate_config():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_validate_config")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
# Validate config
assert config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_startdate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_startdate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.start_date = "2020-05-10"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_enddate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_enddate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.end_date = "2020-05-10"
config.timestampformat = "%Y%m%d"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_metadata_terms():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_metadata_terms")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set an incorrect metadata term
config.filename_metadata.append("invalid")
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_filename_metadata():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_filename_metadata")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Do not set required filename_metadata
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_cluster():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_cluster")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set invalid cluster type
config.cluster = "MyCluster"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_metadata_parser_snapshots():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_snapshots_coimg():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots_coimg", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "FAKE"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014"
config.end_date = "2014"
config.timestampformat = '%Y' # no date in filename so check date range and date_format are ignored
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
config.include_all_subdirs = False
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == expected
def test_plantcv_parallel_metadata_parser_regex():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.delimiter = r'(VIS)_(SV)_(\d+)_(z1)_(h1)_(g0)_(e82)_(\d+)'
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_images_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR"}
config.start_date = "1970-01-01 00_00_00"
config.end_date = "1970-01-01 00_00_00"
config.timestampformat = "%Y-%m-%d %H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{4}-\d{2}-\d{2} \d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_no_default_dates():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_no_default_dates", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV", "id": "117770"}
config.start_date = None
config.end_date = None
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_workflowconfig_subdaily_timestampformat():
'''
timestampformats with only hours and smaller units of time were failing if the script was run earlier in the day than the images were taken. this was fixed by setting end_date to 23-59-59 if we don't detect the year-month-day
'''
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_IMG_DIR2, "test_plantcv_parallel_metadata_parser_subdaily_timestampformat", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR", "camera": "SV"}
config.start_date = None
config.end_date = None
config.timestampformat = "%H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'NIR_SV_0_z1_h1_g0_e65_23_59_59.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images_w_date','NIR_SV_0_z1_h1_g0_e65_23_59_59.jpg'),
'imgtype': 'NIR',
'camera': 'SV',
'frame': '0',
'zoom': 'z1',
'lifter': 'h1',
'gain': 'g0',
'exposure': 'e65',
'timestamp': '23_59_59',
'measurementlabel': 'none',
'cartag':'none',
'id': 'none',
'treatment': 'none',
'plantbarcode': 'none',
'other': 'none'
}
}
def test_plantcv_parallel_check_date_range_wrongdateformat():
start_date = 10
end_date = 10
img_time = '2010-10-10'
with pytest.raises(SystemExit, match=r'does not match format'):
date_format = '%Y%m%d'
_ = plantcv.parallel.check_date_range(
start_date, end_date, img_time, date_format)
def test_plantcv_parallel_metadata_parser_snapshot_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshot_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_fail_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_fail_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"cartag": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_NIR_ONLY
def test_plantcv_parallel_metadata_parser_images_with_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_with_frame", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame",
"output.json")
config.filename_metadata = ["imgtype", "camera", "X", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_camera():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame", "output.json")
config.filename_metadata = ["imgtype", "X", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'none',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'none',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_job_builder_single_image():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_single_image")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
jobs = plantcv.parallel.job_builder(meta=METADATA_VIS_ONLY, config=config)
image_name = list(METADATA_VIS_ONLY.keys())[0]
result_file = os.path.join(cache_dir, image_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', METADATA_VIS_ONLY[image_name]['path'], '--outdir',
cache_dir, '--result', result_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_job_builder_coprocess():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_coprocess")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
config.coprocess = "NIR"
jobs = plantcv.parallel.job_builder(meta=METADATA_COPROCESS, config=config)
img_names = list(METADATA_COPROCESS.keys())
vis_name = img_names[0]
vis_path = METADATA_COPROCESS[vis_name]['path']
result_file = os.path.join(cache_dir, vis_name + '.txt')
nir_name = img_names[1]
coresult_file = os.path.join(cache_dir, nir_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', vis_path, '--outdir', cache_dir, '--result', result_file,
'--coresult', coresult_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_multiprocess_create_dask_cluster_local():
client = plantcv.parallel.create_dask_cluster(cluster="LocalCluster", cluster_config={})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster():
client = plantcv.parallel.create_dask_cluster(cluster="HTCondorCluster", cluster_config={"cores": 1,
"memory": "1GB",
"disk": "1GB"})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster_invalid_cluster():
with pytest.raises(ValueError):
_ = plantcv.parallel.create_dask_cluster(cluster="Skynet", cluster_config={})
def test_plantcv_parallel_convert_datetime_to_unixtime():
unix_time = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m-%d")
assert unix_time == 0
def test_plantcv_parallel_convert_datetime_to_unixtime_bad_strptime():
with pytest.raises(SystemExit):
_ = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m")
def test_plantcv_parallel_multiprocess():
image_name = list(METADATA_VIS_ONLY.keys())[0]
image_path = os.path.join(METADATA_VIS_ONLY[image_name]['path'], image_name)
result_file = os.path.join(TEST_TMPDIR, image_name + '.txt')
jobs = [['python', TEST_PIPELINE, '--image', image_path, '--outdir', TEST_TMPDIR, '--result', result_file,
'--writeimg', '--other', 'on']]
# Create a dask LocalCluster client
client = Client(n_workers=1)
plantcv.parallel.multiprocess(jobs, client=client)
assert os.path.exists(result_file)
def test_plantcv_parallel_process_results():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
# Assert that the output JSON file matches the expected output JSON file
result_file = open(os.path.join(cache_dir, "appended_results.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "appended_results.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_new_output():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_new_output")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'new_result.json'))
# Assert output matches expected values
result_file = open(os.path.join(cache_dir, "new_result.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "new_result.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_valid_json():
# Test when the file is a valid json file but doesn't contain expected keys
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(PARALLEL_TEST_DATA, "valid.json"))
def test_plantcv_parallel_process_results_invalid_json():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_invalid_json")
os.mkdir(cache_dir)
# Move the test data to the tmp directory
shutil.copytree(os.path.join(PARALLEL_TEST_DATA, "bad_results"), os.path.join(cache_dir, "bad_results"))
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(cache_dir, "bad_results"),
json_file=os.path.join(cache_dir, "bad_results", "invalid.txt"))
# ####################################################################################################################
# ########################################### PLANTCV MAIN PACKAGE ###################################################
matplotlib.use('Template')
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
HYPERSPECTRAL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hyperspectral_data")
HYPERSPECTRAL_DATA = "darkReference"
HYPERSPECTRAL_WHITE = "darkReference_whiteReference"
HYPERSPECTRAL_DARK = "darkReference_darkReference"
HYPERSPECTRAL_HDR = "darkReference.hdr"
HYPERSPECTRAL_MASK = "darkReference_mask.png"
HYPERSPECTRAL_DATA_NO_DEFAULT = "darkReference2"
HYPERSPECTRAL_HDR_NO_DEFAULT = "darkReference2.hdr"
HYPERSPECTRAL_DATA_APPROX_PSEUDO = "darkReference3"
HYPERSPECTRAL_HDR_APPROX_PSEUDO = "darkReference3.hdr"
HYPERSPECTRAL_HDR_SMALL_RANGE = {'description': '{[HEADWALL Hyperspec III]}', 'samples': '800', 'lines': '1',
'bands': '978', 'header offset': '0', 'file type': 'ENVI Standard',
'interleave': 'bil', 'sensor type': 'Unknown', 'byte order': '0',
'default bands': '159,253,520', 'wavelength units': 'nm',
'wavelength': ['379.027', '379.663', '380.3', '380.936', '381.573', '382.209']}
FLUOR_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "photosynthesis_data")
FLUOR_IMG = "PSII_PSD_supopt_temp_btx623_22_rep1.DAT"
TEST_COLOR_DIM = (2056, 2454, 3)
TEST_GRAY_DIM = (2056, 2454)
TEST_BINARY_DIM = TEST_GRAY_DIM
TEST_INPUT_COLOR = "input_color_img.jpg"
TEST_INPUT_GRAY = "input_gray_img.jpg"
TEST_INPUT_GRAY_SMALL = "input_gray_img_small.jpg"
TEST_INPUT_BINARY = "input_binary_img.png"
# Image from http://www.libpng.org/pub/png/png-OwlAlpha.html
# This image may be used, edited and reproduced freely.
TEST_INPUT_RGBA = "input_rgba.png"
TEST_INPUT_BAYER = "bayer_img.png"
TEST_INPUT_ROI_CONTOUR = "input_roi_contour.npz"
TEST_INPUT_ROI_HIERARCHY = "input_roi_hierarchy.npz"
TEST_INPUT_CONTOURS = "input_contours.npz"
TEST_INPUT_OBJECT_CONTOURS = "input_object_contours.npz"
TEST_INPUT_OBJECT_HIERARCHY = "input_object_hierarchy.npz"
TEST_VIS = "VIS_SV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR = "NIR_SV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_VIS_TV = "VIS_TV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR_TV = "NIR_TV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_INPUT_MASK = "input_mask_binary.png"
TEST_INPUT_MASK_OOB = "mask_outbounds.png"
TEST_INPUT_MASK_RESIZE = "input_mask_resize.png"
TEST_INPUT_NIR_MASK = "input_nir.png"
TEST_INPUT_FDARK = "FLUO_TV_dark.png"
TEST_INPUT_FDARK_LARGE = "FLUO_TV_DARK_large"
TEST_INPUT_FMIN = "FLUO_TV_min.png"
TEST_INPUT_FMAX = "FLUO_TV_max.png"
TEST_INPUT_FMASK = "FLUO_TV_MASK.png"
TEST_INPUT_GREENMAG = "input_green-magenta.jpg"
TEST_INPUT_MULTI = "multi_ori_image.jpg"
TEST_INPUT_MULTI_MASK = "multi_ori_mask.jpg"
TEST_INPUT_MULTI_OBJECT = "roi_objects.npz"
TEST_INPUT_MULTI_CONTOUR = "multi_contours.npz"
TEST_INPUT_ClUSTER_CONTOUR = "clusters_i.npz"
TEST_INPUT_MULTI_HIERARCHY = "multi_hierarchy.npz"
TEST_INPUT_VISUALIZE_CONTOUR = "roi_objects_visualize.npz"
TEST_INPUT_VISUALIZE_HIERARCHY = "roi_obj_hierarchy_visualize.npz"
TEST_INPUT_VISUALIZE_CLUSTERS = "clusters_i_visualize.npz"
TEST_INPUT_VISUALIZE_BACKGROUND = "visualize_background_img.png"
TEST_INPUT_GENOTXT = "cluster_names.txt"
TEST_INPUT_GENOTXT_TOO_MANY = "cluster_names_too_many.txt"
TEST_INPUT_CROPPED = 'cropped_img.jpg'
TEST_INPUT_CROPPED_MASK = 'cropped-mask.png'
TEST_INPUT_MARKER = 'seed-image.jpg'
TEST_INPUT_SKELETON = 'input_skeleton.png'
TEST_INPUT_SKELETON_PRUNED = 'input_pruned_skeleton.png'
TEST_FOREGROUND = "TEST_FOREGROUND.jpg"
TEST_BACKGROUND = "TEST_BACKGROUND.jpg"
TEST_PDFS = "naive_bayes_pdfs.txt"
TEST_PDFS_BAD = "naive_bayes_pdfs_bad.txt"
TEST_VIS_SMALL = "setaria_small_vis.png"
TEST_MASK_SMALL = "setaria_small_mask.png"
TEST_VIS_COMP_CONTOUR = "setaria_composed_contours.npz"
TEST_ACUTE_RESULT = np.asarray([[[119, 285]], [[151, 280]], [[168, 267]], [[168, 262]], [[171, 261]], [[224, 269]],
[[246, 271]], [[260, 277]], [[141, 248]], [[183, 194]], [[188, 237]], [[173, 240]],
[[186, 260]], [[147, 244]], [[163, 246]], [[173, 268]], [[170, 272]], [[151, 320]],
[[195, 289]], [[228, 272]], [[210, 272]], [[209, 247]], [[210, 232]]])
TEST_VIS_SMALL_PLANT = "setaria_small_plant_vis.png"
TEST_MASK_SMALL_PLANT = "setaria_small_plant_mask.png"
TEST_VIS_COMP_CONTOUR_SMALL_PLANT = "setaria_small_plant_composed_contours.npz"
TEST_SAMPLED_RGB_POINTS = "sampled_rgb_points.txt"
TEST_TARGET_IMG = "target_img.png"
TEST_TARGET_IMG_WITH_HEXAGON = "target_img_w_hexagon.png"
TEST_TARGET_IMG_TRIANGLE = "target_img copy.png"
TEST_SOURCE1_IMG = "source1_img.png"
TEST_SOURCE2_IMG = "source2_img.png"
TEST_TARGET_MASK = "mask_img.png"
TEST_TARGET_IMG_COLOR_CARD = "color_card_target.png"
TEST_SOURCE2_MASK = "mask2_img.png"
TEST_TARGET_MATRIX = "target_matrix.npz"
TEST_SOURCE1_MATRIX = "source1_matrix.npz"
TEST_SOURCE2_MATRIX = "source2_matrix.npz"
TEST_MATRIX_B1 = "matrix_b1.npz"
TEST_MATRIX_B2 = "matrix_b2.npz"
TEST_TRANSFORM1 = "transformation_matrix1.npz"
TEST_MATRIX_M1 = "matrix_m1.npz"
TEST_MATRIX_M2 = "matrix_m2.npz"
TEST_S1_CORRECTED = "source_corrected.png"
TEST_SKELETON_OBJECTS = "skeleton_objects.npz"
TEST_SKELETON_HIERARCHIES = "skeleton_hierarchies.npz"
TEST_THERMAL_ARRAY = "thermal_img.npz"
TEST_THERMAL_IMG_MASK = "thermal_img_mask.png"
TEST_INPUT_THERMAL_CSV = "FLIR2600.csv"
# TEST_BAD_MASK = "bad_mask_test.pkl"
# TEST_IM_BAD_NONE = "bad_mask_none.pkl"
# TEST_IM_BAD_BOTH = "bad_mask_both.pkl"
# TEST_IM_BAD_NAN = "bad_mask_nan.pkl"
# TEST_IM_BAD_INF = "bad_mask_inf.pkl"
PIXEL_VALUES = "pixel_inspector_rgb_values.txt"
# ##########################
# Tests for the main package
# ##########################
@pytest.mark.parametrize("debug", ["print", "plot"])
def test_plantcv_debug(debug, tmpdir):
from plantcv.plantcv._debug import _debug
# Create a test tmp directory
img_outdir = tmpdir.mkdir("sub")
pcv.params.debug = debug
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
_debug(visual=img, filename=os.path.join(img_outdir, TEST_INPUT_COLOR))
assert True
@pytest.mark.parametrize("datatype,value", [[list, []], [int, 2], [float, 2.2], [bool, True], [str, "2"], [dict, {}],
[tuple, ()], [None, None]])
def test_plantcv_outputs_add_observation(datatype, value):
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=datatype, value=value, label=[])
assert outputs.observations["default"]["test"]["value"] == value
def test_plantcv_outputs_add_observation_invalid_type():
# Create output instance
outputs = pcv.Outputs()
with pytest.raises(RuntimeError):
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=list, value=np.array([2]), label=[])
def test_plantcv_outputs_save_results_json_newfile(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_json_existing_file(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "data_results.txt")
shutil.copyfile(os.path.join(TEST_DATA, "data_results.txt"), outfile)
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_csv(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.csv")
testfile = os.path.join(TEST_DATA, "data_results.csv")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='string', trait='string variable', method='string', scale='none',
datatype=str, value="string", label="none")
outputs.add_observation(sample='default', variable='boolean', trait='boolean variable', method='boolean',
scale='none', datatype=bool, value=True, label="none")
outputs.add_observation(sample='default', variable='list', trait='list variable', method='list',
scale='none', datatype=list, value=[1, 2, 3], label=[1, 2, 3])
outputs.add_observation(sample='default', variable='tuple', trait='tuple variable', method='tuple',
scale='none', datatype=tuple, value=(1, 2), label=(1, 2))
outputs.add_observation(sample='default', variable='tuple_list', trait='list of tuples variable',
method='tuple_list', scale='none', datatype=list, value=[(1, 2), (3, 4)], label=[1, 2])
outputs.save_results(filename=outfile, outformat="csv")
with open(outfile, "r") as fp:
results = fp.read()
with open(testfile, "r") as fp:
test_results = fp.read()
assert results == test_results
def test_plantcv_transform_warp_smaller():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
bimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY),-1)
bimg_small = cv2.resize(bimg, (200,300)) #not sure why INTER_NEAREST doesn't preserve values
bimg_small[bimg_small>0]=255
mrow, mcol = bimg_small.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug = None
mask_warped = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
pcv.params.debug = 'plot'
mask_warped_plot = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.count_nonzero(mask_warped)==93142
assert np.count_nonzero(mask_warped_plot)==93142
def test_plantcv_transform_warp_larger():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug='print'
mask_warped_print = pcv.transform.warp(gimg_large, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.sum(mask_warped_print)==83103814
def test_plantcv_transform_warp_rgbimgerror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
def test_plantcv_transform_warp_4ptserror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
mrow, mcol, _ = img.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,0], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,1], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,2], img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1),(0,vrow-1)])
def test_plantcv_acute():
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
_ = pcv.acute(obj=np.array(([[213, 190]], [[83, 61]], [[149, 246]])), win=84, thresh=192, mask=mask)
_ = pcv.acute(obj=np.array(([[3, 29]], [[31, 102]], [[161, 63]])), win=148, thresh=56, mask=mask)
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
# Test with debug = None
pcv.params.debug = None
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
homology_pts = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
assert all([i == j] for i, j in zip(np.shape(homology_pts), (29, 1, 2)))
def test_plantcv_acute_vertex():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, label="prefix")
_ = pcv.acute_vertex(obj=[], win=5, thresh=15, sep=5, img=img)
_ = pcv.acute_vertex(obj=[], win=.01, thresh=.01, sep=1, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
# Test with debug = None
pcv.params.debug = None
acute = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(np.shape(acute), np.shape(TEST_ACUTE_RESULT)))
pcv.outputs.clear()
def test_plantcv_acute_vertex_bad_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(result, [0, ("NA", "NA")]))
pcv.outputs.clear()
def test_plantcv_analyze_bound_horizontal():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300, label="prefix")
pcv.outputs.clear()
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)
_ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(pcv.outputs.observations["default"]) == 7
def test_plantcv_analyze_bound_horizontal_grayscale_image():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(np.shape(boundary_img1)) == 3
def test_plantcv_analyze_bound_horizontal_neg_y():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug=None, line position that will trigger -y
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=-1000)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=0)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=2056)
assert pcv.outputs.observations['default']['height_above_reference']['value'] == 713
def test_plantcv_analyze_bound_vertical():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
def test_plantcv_analyze_bound_vertical_grayscale_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
pcv.outputs.clear()
def test_plantcv_analyze_bound_vertical_neg_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug="plot", line position that will trigger -x
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=2454)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 441
def test_plantcv_analyze_bound_vertical_small_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug='plot', line position that will trigger -x, and two channel object
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1)
assert pcv.outputs.observations['default']['width_right_reference']['value'] == 441
def test_plantcv_analyze_color():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = "print"
# pcv.params.debug = "print"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
# Test with debug = "plot"
# pcv.params.debug = "plot"
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = None
# pcv.params.debug = None
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='rgb')
assert pcv.outputs.observations['default']['hue_median']['value'] == 84.0
def test_plantcv_analyze_color_incorrect_image():
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img_binary, mask=mask, hist_plot_type=None)
#
#
def test_plantcv_analyze_color_bad_hist_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='bgr')
def test_plantcv_analyze_color_incorrect_hist_plot_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="bgr")
def test_plantcv_analyze_nir():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=img, mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_nir_16bit():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=np.uint16(img), mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_object():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
pcv.outputs.clear()
assert len(obj_images) != 0
def test_plantcv_analyze_object_grayscale_input():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 1
def test_plantcv_analyze_object_zero_slope():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:11, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[11, 10]], [[12, 10]], [[13, 10]], [[14, 10]], [[15, 10]], [[16, 10]],
[[17, 10]], [[18, 10]], [[19, 10]], [[20, 10]], [[21, 10]], [[22, 10]], [[23, 10]],
[[24, 10]], [[25, 10]], [[26, 10]], [[27, 10]], [[28, 10]], [[29, 10]], [[30, 10]],
[[31, 10]], [[32, 10]], [[33, 10]], [[34, 10]], [[35, 10]], [[36, 10]], [[37, 10]],
[[38, 10]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]], [[34, 10]],
[[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]], [[27, 10]],
[[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]], [[20, 10]],
[[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]], [[13, 10]],
[[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2d():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[0:5, 45:49, 0] = 255
img[0:5, 0:5, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[45, 1]], [[45, 2]], [[45, 3]], [[45, 4]], [[46, 4]], [[47, 4]], [[48, 4]],
[[48, 3]], [[48, 2]], [[48, 1]], [[47, 1]], [[46, 1]], [[1, 1]], [[1, 2]],
[[1, 3]], [[1, 4]], [[2, 4]], [[3, 4]], [[4, 4]], [[4, 3]], [[4, 2]],
[[4, 1]], [[3, 1]], [[2, 1]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2e():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:15, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[10, 11]], [[10, 12]], [[10, 13]], [[10, 14]], [[11, 14]], [[12, 14]],
[[13, 14]], [[14, 14]], [[15, 14]], [[16, 14]], [[17, 14]], [[18, 14]], [[19, 14]],
[[20, 14]], [[21, 14]], [[22, 14]], [[23, 14]], [[24, 14]], [[25, 14]], [[26, 14]],
[[27, 14]], [[28, 14]], [[29, 14]], [[30, 14]], [[31, 14]], [[32, 14]], [[33, 14]],
[[34, 14]], [[35, 14]], [[36, 14]], [[37, 14]], [[38, 14]], [[39, 14]], [[39, 13]],
[[39, 12]], [[39, 11]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]],
[[34, 10]], [[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]],
[[27, 10]], [[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]],
[[20, 10]], [[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]],
[[13, 10]], [[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_small_contour():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_contour = [np.array([[[0, 0]], [[0, 50]], [[50, 50]], [[50, 0]]], dtype=np.int32)]
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert obj_images is None
def test_plantcv_analyze_thermal_values():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_thermal_values")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
# img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_THERMAL_IMG_MASK), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_THERMAL_ARRAY), encoding="latin1")
img = contours_npz['arr_0']
pcv.params.debug = None
thermal_hist = pcv.analyze_thermal_values(thermal_array=img, mask=mask, histplot=True)
assert thermal_hist is not None and pcv.outputs.observations['default']['median_temp']['value'] == 33.20922
def test_plantcv_apply_mask_white():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_white")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="white")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_black():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_black")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="black")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
hyper_array = pcv.hyperspectral.read_data(filename=spectral_filename)
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img_stacked, mask=img, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
masked_array = pcv.apply_mask(img=hyper_array.array_data, mask=img, mask_color="black")
assert np.mean(masked_array) == 13.97111260224949
def test_plantcv_apply_mask_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="wite")
def test_plantcv_auto_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=(20, 10), padding_y=(20, 10), color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], color='image')
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=2000, padding_y=2000, color='image')
# Test with debug = None
pcv.params.debug = None
cropped = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=20, padding_y=20, color='black')
x, y, z = np.shape(img1)
x1, y1, z1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='white')
x, y = np.shape(gray_img)
x1, y1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_bad_color_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='wite')
def test_plantcv_auto_crop_bad_padding_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x="one", padding_y=20, color='white')
def test_plantcv_canny_edge_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.canny_edge_detect(img=rgb_img, mask=mask, mask_color='white')
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.canny_edge_detect(img=img, thickness=2)
_ = pcv.canny_edge_detect(img=img)
# Test with debug = None
pcv.params.debug = None
edge_img = pcv.canny_edge_detect(img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(edge_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(edge_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_canny_edge_detect_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color="gray")
def test_plantcv_closing():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.closing(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.closing(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.closing(bin_img)
assert np.sum(filtered_img) == 16261860
def test_plantcv_closing_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.closing(rgb_img)
def test_plantcv_cluster_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, show_grid=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierarchy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierachy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierachy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_splitimg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file=None, filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=[[0]], contours=[],
hierarchy=np.array([[[1, -1, -1, -1]]]))
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file='multi', filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None, filenames=cluster_names)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None,
filenames=cluster_names_too_many)
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_cluster_contours_splitimg_grayscale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg_grayscale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
pcv.params.debug = None
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_color_palette():
# Return a color palette
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_random():
# Return a color palette in random order
pcv.params.color_sequence = "random"
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_saved():
# Return a color palette that was saved
pcv.params.saved_color_scale = [[0, 0, 0], [255, 255, 255]]
colors = pcv.color_palette(num=2, saved=True)
assert colors == [[0, 0, 0], [255, 255, 255]]
def test_plantcv_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img, _, _ = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50)
def test_plantcv_crop_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50, 4)
def test_plantcv_crop_position_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_three_channel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_three_channel, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE))
mask_non_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="top", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=45, y=2, v_pos="top", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_bad_input_x():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=-1, y=-1, v_pos="top", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_vpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="below", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_hpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="starboard")
def test_plantcv_dilate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_dilate")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
dilate_img = pcv.dilate(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(dilate_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(dilate_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_dilate_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.dilate(img, 1, 1)
def test_plantcv_erode():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_erode")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
erode_img = pcv.erode(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_erode_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.erode(img, 1, 1)
def test_plantcv_distance_transform():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_distance_transform")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = None
pcv.params.debug = None
distance_transform_img = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(distance_transform_img), np.shape(mask)))
def test_plantcv_fatal_error():
# Verify that the fatal_error function raises a RuntimeError
with pytest.raises(RuntimeError):
pcv.fatal_error("Test error")
def test_plantcv_fill():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill(bin_img=img, size=63632)
# Assert that the output image has the dimensions of the input image
# assert all([i == j] for i, j in zip(np.shape(fill_img), TEST_BINARY_DIM))
assert np.sum(fill_img) == 0
def test_plantcv_fill_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill(bin_img=img, size=1)
def test_plantcv_fill_holes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill_holes(bin_img=img)
pcv.params.debug = "plot"
_ = pcv.fill_holes(bin_img=img)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill_holes(bin_img=img)
assert np.sum(fill_img) > np.sum(img)
def test_plantcv_fill_holes_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill_holes(bin_img=img)
def test_plantcv_find_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = None
pcv.params.debug = None
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_find_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_flip():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_flip")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.flip(img=img, direction="horizontal")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.flip(img=img, direction="vertical")
_ = pcv.flip(img=img_binary, direction="vertical")
# Test with debug = None
pcv.params.debug = None
flipped_img = pcv.flip(img=img, direction="horizontal")
assert all([i == j] for i, j in zip(np.shape(flipped_img), TEST_COLOR_DIM))
def test_plantcv_flip_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.flip(img=img, direction="vert")
def test_plantcv_gaussian_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_gaussian_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
_ = pcv.gaussian_blur(img=img_color, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = None
pcv.params.debug = None
gaussian_img = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
imgavg = np.average(img)
gavg = np.average(gaussian_img)
assert gavg != imgavg
def test_plantcv_get_kernel_cross():
kernel = pcv.get_kernel(size=(3, 3), shape="cross")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_rectangle():
kernel = pcv.get_kernel(size=(3, 3), shape="rectangle")
assert (kernel == np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])).all()
def test_plantcv_get_kernel_ellipse():
kernel = pcv.get_kernel(size=(3, 3), shape="ellipse")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_bad_input_size():
with pytest.raises(ValueError):
_ = pcv.get_kernel(size=(1, 1), shape="ellipse")
def test_plantcv_get_kernel_bad_input_shape():
with pytest.raises(RuntimeError):
_ = pcv.get_kernel(size=(3, 1), shape="square")
def test_plantcv_get_nir_sv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR)
assert nirpath == nirpath1
def test_plantcv_get_nir_tv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS_TV)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR_TV)
assert nirpath == nirpath1
def test_plantcv_hist_equalization():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = None
pcv.params.debug = None
hist = pcv.hist_equalization(gray_img=img)
histavg = np.average(hist)
imgavg = np.average(img)
assert histavg != imgavg
def test_plantcv_hist_equalization_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), 1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.hist_equalization(gray_img=img)
def test_plantcv_image_add():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_add")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = None
pcv.params.debug = None
added_img = pcv.image_add(gray_img1=img1, gray_img2=img2)
assert all([i == j] for i, j in zip(np.shape(added_img), TEST_BINARY_DIM))
def test_plantcv_image_subtract():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_sub")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = 'print'
_ = pcv.image_subtract(img1, img2)
# Test with debug = "plot"
pcv.params.debug = 'plot'
_ = pcv.image_subtract(img1, img2)
# Test with debug = None
pcv.params.debug = None
new_img = pcv.image_subtract(img1, img2)
assert np.array_equal(new_img, np.zeros(np.shape(new_img), np.uint8))
def test_plantcv_image_subtract_fail():
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
# test
with pytest.raises(RuntimeError):
_ = pcv.image_subtract(img1, img2)
def test_plantcv_invert():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_invert")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.invert(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.invert(gray_img=img)
# Test with debug = None
pcv.params.debug = None
inverted_img = pcv.invert(gray_img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(inverted_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(inverted_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_landmark_reference_pt_dist():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_landmark_reference")
os.mkdir(cache_dir)
points_rescaled = [(0.0139, 0.2569), (0.2361, 0.2917), (0.3542, 0.3819), (0.3542, 0.4167), (0.375, 0.4236),
(0.7431, 0.3681), (0.8958, 0.3542), (0.9931, 0.3125), (0.1667, 0.5139), (0.4583, 0.8889),
(0.4931, 0.5903), (0.3889, 0.5694), (0.4792, 0.4306), (0.2083, 0.5417), (0.3194, 0.5278),
(0.3889, 0.375), (0.3681, 0.3472), (0.2361, 0.0139), (0.5417, 0.2292), (0.7708, 0.3472),
(0.6458, 0.3472), (0.6389, 0.5208), (0.6458, 0.625)]
centroid_rescaled = (0.4685, 0.4945)
bottomline_rescaled = (0.4685, 0.2569)
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=('a', 'b'), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=[(10, 1000)], centroid_r=(10, 10), bline_r=(10, 10))
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=(0, 0), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=points_rescaled, centroid_r=centroid_rescaled,
bline_r=bottomline_rescaled, label="prefix")
assert len(pcv.outputs.observations['prefix'].keys()) == 8
def test_plantcv_laplace_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_laplace_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = None
pcv.params.debug = None
lp_img = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(lp_img), TEST_GRAY_DIM))
def test_plantcv_logical_and():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_and")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
and_img = pcv.logical_and(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(and_img), TEST_BINARY_DIM))
def test_plantcv_logical_or():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_or")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
or_img = pcv.logical_or(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(or_img), TEST_BINARY_DIM))
def test_plantcv_logical_xor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_xor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
xor_img = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(xor_img), TEST_BINARY_DIM))
def test_plantcv_median_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = None
pcv.params.debug = None
blur_img = pcv.median_blur(gray_img=img, ksize=5)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(blur_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(blur_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_median_blur_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.median_blur(img, 5.)
def test_plantcv_naive_bayes_classifier():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = None
pcv.params.debug = None
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(mask), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(mask), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_naive_bayes_classifier_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS_BAD))
def test_plantcv_object_composition():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
_ = pcv.object_composition(img=img, contours=[], hierarchy=object_hierarchy)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Test with debug = None
pcv.params.debug = None
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_object_composition_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_within_frame():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask_ib = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_oob = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_OOB), -1)
in_bounds_ib = pcv.within_frame(mask=mask_ib, border_width=1, label="prefix")
in_bounds_oob = pcv.within_frame(mask=mask_oob, border_width=1)
assert (in_bounds_ib is True and in_bounds_oob is False)
def test_plantcv_within_frame_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
grayscale_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
with pytest.raises(RuntimeError):
_ = pcv.within_frame(grayscale_img)
def test_plantcv_opening():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.opening(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.opening(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.opening(bin_img)
assert np.sum(filtered_img) == 16184595
def test_plantcv_opening_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.opening(rgb_img)
def test_plantcv_output_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=False)
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Remove tmp files in working direcctory
shutil.rmtree("ori-images")
shutil.rmtree("mask-images")
# Test with debug = None
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png',
outdir=cache_dir, mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_output_mask_true():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
pcv.params.debug_outdir = cache_dir
os.mkdir(cache_dir)
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir,
mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_plot_image_matplotlib_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pimg = pcv.visualize.pseudocolor(gray_img=img, mask=mask, min_value=10, max_value=200)
with pytest.raises(RuntimeError):
pcv.plot_image(pimg)
def test_plantcv_plot_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
try:
pcv.plot_image(img=img)
except RuntimeError:
assert False
# Assert that the image was plotted without error
assert True
def test_plantcv_print_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_bad_type():
with pytest.raises(RuntimeError):
pcv.print_image(img=[], filename="/dev/null")
def test_plantcv_print_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_matplotlib():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
# Input data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
plt.figure()
plt.imshow(img)
plot = plt.gcf()
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=plot, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_results(tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
pcv.print_results(filename=outfile)
assert os.path.exists(outfile)
def test_plantcv_readimage_native():
# Test with debug = None
pcv.params.debug = None
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='rgba')
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
# Assert that the image name returned equals the name of the input image
# Assert that the path of the image returned equals the path of the input image
# Assert that the dimensions of the returned image equals the expected dimensions
if img_name == TEST_INPUT_COLOR and path == TEST_DATA:
if all([i == j] for i, j in zip(np.shape(img), TEST_COLOR_DIM)):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_readimage_grayscale():
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="grey")
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="gray")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="rgb")
assert len(np.shape(img)) == 3
def test_plantcv_readimage_rgba_as_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_RGBA), mode="native")
assert np.shape(img)[2] == 3
def test_plantcv_readimage_csv():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_THERMAL_CSV), mode="csv")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_envi():
# Test with debug = None
pcv.params.debug = None
array_data = pcv.readimage(filename=os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA), mode="envi")
if sys.version_info[0] < 3:
assert len(array_data.array_type) == 8
def test_plantcv_readimage_bad_file():
with pytest.raises(RuntimeError):
_ = pcv.readimage(filename=TEST_INPUT_COLOR)
def test_plantcv_readbayer_default_bg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_readbayer_default_bg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
# Test with debug = "plot"
pcv.params.debug = "plot"
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_bad_input():
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, "no-image.png"), bayerpattern="GR", alg="default")
def test_plantcv_rectangle_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rectangle_mask(img=img_color, p1=(0, 0), p2=(2454, 2056), color="gray")
# Test with debug = None
pcv.params.debug = None
masked, hist, contour, heir = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="black")
maskedsum = np.sum(masked)
imgsum = np.sum(img)
assert maskedsum < imgsum
def test_plantcv_rectangle_mask_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="whit")
def test_plantcv_report_size_marker_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_report_size_marker_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
pcv.outputs.clear()
assert len(images) != 0
def test_plantcv_report_size_marker_define():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_grayscale_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# ROI contour
roi_contour = [np.array([[[0, 0]], [[0, 49]], [[49, 49]], [[49, 0]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_bad_marker_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='none',
objcolor='light', thresh_channel='s', thresh=120)
def test_plantcv_report_size_marker_bad_threshold_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel=None, thresh=120)
def test_plantcv_rgb2gray_cmyk():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
c = pcv.rgb2gray_cmyk(rgb_img=img, channel="c")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(c), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_cmyk_bad_channel():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
# Channel S is not in CMYK
_ = pcv.rgb2gray_cmyk(rgb_img=img, channel="s")
def test_plantcv_rgb2gray_hsv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_hsv")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = None
pcv.params.debug = None
s = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(s), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_hsv_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="l")
def test_plantcv_rgb2gray_lab():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_lab")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = None
pcv.params.debug = None
b = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(b), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_lab_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_lab(rgb_img=img, channel="v")
def test_plantcv_rgb2gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray(rgb_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray(rgb_img=img)
# Test with debug = None
pcv.params.debug = None
gray = pcv.rgb2gray(rgb_img=img)
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(gray), TEST_GRAY_DIM))
def test_plantcv_roi2mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "plot"
_ = pcv.roi.roi2mask(img=img, contour=obj_contour)
pcv.params.debug = "print"
mask = pcv.roi.roi2mask(img=img, contour=obj_contour)
assert np.shape(mask)[0:2] == np.shape(img)[0:2] and np.sum(mask) == 255
def test_plantcv_roi_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="largest")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="partial")
# Test with debug = None and roi_type = cutto
pcv.params.debug = None
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="cutto")
# Test with debug = None
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy, roi_type="partial")
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_roi_objects_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.roi_objects(img=img, roi_type="cut", roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy)
def test_plantcv_roi_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_type="partial", roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy)
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_rotate():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
rotated = pcv.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rotate_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate_gray():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_scale_features():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scale_features")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position='NA')
# Test with debug = None
pcv.params.debug = None
points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(obj=obj_contour, mask=mask,
points=TEST_ACUTE_RESULT,
line_position=50)
assert len(points_rescaled) == 23
def test_plantcv_scale_features_bad_input():
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_scharr_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scharr_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
# Test with debug = "print"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = None
pcv.params.debug = None
scharr_img = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(scharr_img), TEST_GRAY_DIM))
def test_plantcv_shift_img():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_shift_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="bottom")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="right")
# Test with debug = "plot"
_ = pcv.shift_img(img=mask, number=300, side="left")
# Test with debug = None
pcv.params.debug = None
rotated = pcv.shift_img(img=img, number=300, side="top")
imgavg = np.average(img)
shiftavg = np.average(rotated)
assert shiftavg != imgavg
def test_plantcv_shift_img_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=-300, side="top")
def test_plantcv_shift_img_bad_side_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=300, side="starboard")
def test_plantcv_sobel_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = None
pcv.params.debug = None
sobel_img = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(sobel_img), TEST_GRAY_DIM))
def test_plantcv_stdev_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
pcv.params.debug = "plot"
_ = pcv.stdev_filter(img=img, ksize=11)
pcv.params.debug = "print"
filter_img = pcv.stdev_filter(img=img, ksize=11)
assert (np.shape(filter_img) == np.shape(img))
def test_plantcv_watershed_segmentation():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_watershed_segmentation")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
# Test with debug = None
pcv.params.debug = None
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
assert pcv.outputs.observations['default']['estimated_object_count']['value'] > 9
def test_plantcv_white_balance_gray_16bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_16bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_gray_8bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_8bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_rgb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 5, 5, 5))
def test_plantcv_white_balance_bad_mode_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='histogram', roi=(5, 5, 80, 80))
def test_plantcv_white_balance_bad_input_int():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5., 5, 5, 5))
def test_plantcv_x_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_x_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[0, 0], [0, 0]]), mask=np.array([[0, 0], [0, 0]]), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=(), mask=mask, img=img)
# Test with debug = None
pcv.params.debug = None
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_small_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_bad_input():
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_x_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_y_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
_ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=(), mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[21, 11]], [[159, 155]], [[237, 11]])),
mask=np.array(([[38, 54]], [[144, 169]], [[81, 137]])), img=img)
# Test with debug = None
pcv.params.debug = None
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_small_obj():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.outputs.clear()
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_y_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.y_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_background_subtraction():
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
big_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Testing if background subtraction is actually still working.
# This should return an array whose sum is greater than one
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
fgmask = pcv.background_subtraction(background_image=big_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) > 0)
# The same foreground subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=fg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) == 0)
# The same background subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) == 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_debug():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_background_subtraction_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
# Test with debug = "print"
pcv.params.debug = "print"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# Test with debug = "plot"
pcv.params.debug = "plot"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_bad_img_type():
fg_color = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_gray = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND), 0)
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.background_subtraction(background_image=bg_gray, foreground_image=fg_color)
def test_plantcv_background_subtraction_different_sizes():
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
bg_shp = np.shape(bg_img) # type: tuple
bg_img_resized = cv2.resize(bg_img, (int(bg_shp[0] / 2), int(bg_shp[1] / 2)), interpolation=cv2.INTER_AREA)
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img_resized, foreground_image=fg_img)
assert np.sum(fgmask) > 0
def test_plantcv_spatial_clustering_dbscan():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_dbscan")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = "print"
_ = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
pcv.params.debug = "plot"
spmask = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_optics():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_optics")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
spmask = pcv.spatial_clustering(img, algorithm="OPTICS", min_cluster_size=100, max_distance=5000)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_badinput():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
with pytest.raises(NameError):
_ = pcv.spatial_clustering(img, algorithm="Hydra", min_cluster_size=5, max_distance=100)
# ##############################
# Tests for the learn subpackage
# ##############################
def test_plantcv_learn_naive_bayes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
maskdir = os.path.join(cache_dir, "masks")
if not os.path.exists(imgdir):
os.mkdir(imgdir)
if not os.path.exists(maskdir):
os.mkdir(maskdir)
# Copy and image and mask to the image/mask directories
shutil.copyfile(os.path.join(TEST_DATA, TEST_VIS_SMALL), os.path.join(imgdir, "image.png"))
shutil.copyfile(os.path.join(TEST_DATA, TEST_MASK_SMALL), os.path.join(maskdir, "image.png"))
# Run the naive Bayes training module
outfile = os.path.join(cache_dir, "naive_bayes_pdfs.txt")
plantcv.learn.naive_bayes(imgdir=imgdir, maskdir=maskdir, outfile=outfile, mkplots=True)
assert os.path.exists(outfile)
def test_plantcv_learn_naive_bayes_multiclass():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes_multiclass")
os.mkdir(cache_dir)
# Run the naive Bayes multiclass training module
outfile = os.path.join(cache_dir, "naive_bayes_multiclass_pdfs.txt")
plantcv.learn.naive_bayes_multiclass(samples_file=os.path.join(TEST_DATA, TEST_SAMPLED_RGB_POINTS), outfile=outfile,
mkplots=True)
assert os.path.exists(outfile)
# ####################################
# Tests for the morphology subpackage
# ####################################
def test_plantcv_morphology_segment_curvature():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_curvature")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects, label="prefix")
pcv.params.debug = "plot"
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects)
assert len(pcv.outputs.observations['default']['segment_curvature']['value']) == 22
def test_plantcv_morphology_check_cycles():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "print"
_ = pcv.morphology.check_cycles(mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.check_cycles(mask)
pcv.params.debug = None
_ = pcv.morphology.check_cycles(mask)
assert pcv.outputs.observations['default']['num_cycles']['value'] == 1
def test_plantcv_morphology_find_branch_pts():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton)
pcv.params.debug = None
branches = pcv.morphology.find_branch_pts(skel_img=skeleton)
assert np.sum(branches) == 9435
def test_plantcv_morphology_find_tips():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_tips")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_tips(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_tips(skel_img=skeleton)
pcv.params.debug = None
tips = pcv.morphology.find_tips(skel_img=skeleton)
assert np.sum(tips) == 9435
def test_plantcv_morphology_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.prune(skel_img=skeleton, size=1)
pcv.params.debug = "plot"
_ = pcv.morphology.prune(skel_img=skeleton, size=1, mask=skeleton)
pcv.params.debug = None
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_prune_size0():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=0)
assert np.sum(pruned_img) == np.sum(skeleton)
def test_plantcv_morphology_iterative_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img = pcv.morphology._iterative_prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_segment_skeleton():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_skeleton")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.segment_skeleton(skel_img=skeleton, mask=mask)
pcv.params.debug = "plot"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
assert len(segment_objects) == 73
def test_plantcv_morphology_fill_segments():
# Clear previous outputs
pcv.outputs.clear()
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
pcv.params.debug = None
_ = pcv.morphology.fill_segments(mask, obj)
tests = [pcv.outputs.observations['default']['segment_area']['value'][42] == 5529,
pcv.outputs.observations['default']['segment_area']['value'][20] == 5057,
pcv.outputs.observations['default']['segment_area']['value'][49] == 3323]
assert all(tests)
def test_plantcv_morphology_fill_segments_with_stem():
# Clear previous outputs
pcv.outputs.clear()
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
stem_obj = obj[0:4]
pcv.params.debug = None
_ = pcv.morphology.fill_segments(mask, obj, stem_obj)
num_objects = len(pcv.outputs.observations['default']['leaf_area']['value'])
assert num_objects == 70
def test_plantcv_morphology_segment_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 22
def test_plantcv_morphology_segment_angle_overflow():
# Clear previous outputs
pcv.outputs.clear()
# Don't prune, would usually give overflow error without extra if statement in segment_angle
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 73
def test_plantcv_morphology_segment_euclidean_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_eu_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_eu_length']['value']) == 22
def test_plantcv_morphology_segment_euclidean_length_bad_input():
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skel = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skel)
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
def test_plantcv_morphology_segment_path_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_path_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_path_length']['value']) == 22
def test_plantcv_morphology_skeletonize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_skeletonize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
input_skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = "plot"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
skeleton = pcv.morphology.skeletonize(mask=mask)
arr = np.array(skeleton == input_skeleton)
assert arr.all()
def test_plantcv_morphology_segment_sort():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_sort")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.params.debug = "print"
_ = pcv.morphology.segment_sort(skeleton, seg_objects, mask=skeleton)
pcv.params.debug = "plot"
leaf_obj, stem_obj = pcv.morphology.segment_sort(skeleton, seg_objects)
assert len(leaf_obj) == 36
def test_plantcv_morphology_segment_tangent_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2)
assert len(pcv.outputs.observations['default']['segment_tangent_angle']['value']) == 73
def test_plantcv_morphology_segment_id():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_id(skel, objs)
pcv.params.debug = "plot"
_, labeled_img = pcv.morphology.segment_id(skel, objs, mask=skel)
assert np.sum(labeled_img) > np.sum(skel)
def test_plantcv_morphology_segment_insertion_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 3, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
assert pcv.outputs.observations['default']['segment_insertion_angle']['value'][:6] == ['NA', 'NA', 'NA',
24.956918822001636,
50.7313343343401,
56.427712102130734]
def test_plantcv_morphology_segment_insertion_angle_bad_stem():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
stem_obj = [leaf_obj[0], leaf_obj[10]]
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
def test_plantcv_morphology_segment_combine():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
# Test with list of IDs input
_, new_objects = pcv.morphology.segment_combine([0, 1], seg_objects, skel)
assert len(new_objects) + 1 == len(seg_objects)
def test_plantcv_morphology_segment_combine_lists():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "print"
# Test with list of lists input
_, new_objects = pcv.morphology.segment_combine([[0, 1, 2], [3, 4]], seg_objects, skel)
assert len(new_objects) + 3 == len(seg_objects)
def test_plantcv_morphology_segment_combine_bad_input():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_, new_objects = pcv.morphology.segment_combine([0.5, 1.5], seg_objects, skel)
def test_plantcv_morphology_analyze_stem():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_analyze_stem")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, segmented_img, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == -12.531776428222656
def test_plantcv_morphology_analyze_stem_bad_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
_, _ = pcv.morphology.segment_sort(pruned, seg_objects)
# print([stem_obj[3]])
# stem_obj = [stem_obj[3]]
stem_obj = [[[[1116, 1728]], [[1116, 1]]]]
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == 22877334.0
# ########################################
# Tests for the hyperspectral subpackage
# ########################################
def test_plantcv_hyperspectral_read_data_default():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_read_data_default")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
_ = pcv.hyperspectral.read_data(filename=spectral_filename)
pcv.params.debug = "print"
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_no_default_bands():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_NO_DEFAULT)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_approx_pseudorgb():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_APPROX_PSEUDO)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_spectral_index_ndvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_gdvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_gdvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_gdvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.gdvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_savi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_savi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_savi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.savi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ci_rededge():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ci_rededge")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ci_rededge_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ci_rededge(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri550():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri550")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri550_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri550(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri700():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri700")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri700_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri700(hsi=index_array, distance=20)
def test_plantcv_spectral_index_egi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_egi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
index_array = pcv.spectral_index.egi(rgb_img=rgb_img)
assert np.shape(index_array.array_data) == (2056, 2454) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_evi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.evi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mcari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mcari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mcari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mcari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mtci():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mtci")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mtci_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mtci(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ndre():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndre")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndre_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndre(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rgri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rgri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rgri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rgri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rvsi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rvsi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rvsi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rvsi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sipi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sipi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sipi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sipi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sr():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sr")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sr_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sr(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vi_green():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vi_green")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vi_green_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vi_green(hsi=index_array, distance=20)
def test_plantcv_spectral_index_wi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_wi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_wi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.wi(hsi=index_array, distance=20)
def test_plantcv_hyperspectral_analyze_spectral():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_spectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
# pcv.params.debug = "plot"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True)
# pcv.params.debug = "print"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
pcv.params.debug = None
_ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
assert len(pcv.outputs.observations['prefix']['spectral_frequencies']['value']) == 978
def test_plantcv_hyperspectral_analyze_index():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
# pcv.params.debug = "print"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
# pcv.params.debug = "plot"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_set_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_set_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True, min_bin=0, max_bin=1)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_auto_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin="auto", max_bin="auto")
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_outside_range_warning():
import io
from contextlib import redirect_stdout
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
f = io.StringIO()
with redirect_stdout(f):
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin=.5, max_bin=.55, label="i")
out = f.getvalue()
# assert os.listdir(cache_dir) is 0
assert out[0:10] == 'WARNING!!!'
def test_plantcv_hyperspectral_analyze_index_bad_input_mask():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_index():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
index_array.array_data = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_datatype():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=array_data, mask=mask_img)
def test_plantcv_hyperspectral_calibrate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_calibrate")
os.mkdir(cache_dir)
raw = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
white = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_WHITE)
dark = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DARK)
raw = pcv.hyperspectral.read_data(filename=raw)
white = pcv.hyperspectral.read_data(filename=white)
dark = pcv.hyperspectral.read_data(filename=dark)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
pcv.params.debug = "print"
calibrated = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
assert np.shape(calibrated.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_extract_wavelength():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_extract_wavelength")
os.mkdir(cache_dir)
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
pcv.params.debug = "print"
new = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
assert np.shape(new.array_data) == (1, 1600)
def test_plantcv_hyperspectral_avg_reflectance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
spectral = pcv.hyperspectral.read_data(filename=spectral)
avg_reflect = pcv.hyperspectral._avg_reflectance(spectral, mask=mask_img)
assert len(avg_reflect) == 978
def test_plantcv_hyperspectral_inverse_covariance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
inv_cov = pcv.hyperspectral._inverse_covariance(spectral)
assert np.shape(inv_cov) == (978, 978)
# ########################################
# Tests for the photosynthesis subpackage
# ########################################
def test_plantcv_photosynthesis_read_dat():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_photosynthesis_read_dat")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
fluor_filename = os.path.join(FLUOR_TEST_DATA, FLUOR_IMG)
_, _, _ = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
pcv.params.debug = "print"
fdark, fmin, fmax = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
assert np.sum(fmin) < np.sum(fmax)
def test_plantcv_photosynthesis_analyze_fvfm():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# filename = os.path.join(cache_dir, 'plantcv_fvfm_hist.png')
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
fvfm_images = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
assert len(fvfm_images) != 0
def test_plantcv_photosynthesis_analyze_fvfm_print_analysis_results():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
result_file = os.path.join(cache_dir, "results.txt")
pcv.print_results(result_file)
pcv.outputs.clear()
assert os.path.exists(result_file)
def test_plantcv_photosynthesis_analyze_fvfm_bad_fdark():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark + 3000, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
check = pcv.outputs.observations['default']['fdark_passed_qc']['value'] is False
assert check
def test_plantcv_photosynthesis_analyze_fvfm_bad_input():
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
with pytest.raises(RuntimeError):
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
# ##############################
# Tests for the roi subpackage
# ##############################
def test_plantcv_roi_from_binary_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_from_binary_image")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Create a binary image
bin_img = np.zeros(np.shape(rgb_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Create a binary image
bin_img = np.zeros(np.shape(gray_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_bad_binary_input():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Binary input is required but an RGB input is provided
with pytest.raises(RuntimeError):
_, _ = pcv.roi.from_binary_image(bin_img=rgb_img, img=rgb_img)
def test_plantcv_roi_rectangle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_rectangle")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 4, 1, 2)
def test_plantcv_roi_rectangle_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 4, 1, 2)
def test_plantcv_roi_rectangle_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=3000, img=rgb_img)
def test_plantcv_roi_circle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_circle")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.circle(x=100, y=100, r=50, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.circle(x=100, y=100, r=50, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.circle(x=200, y=225, r=75, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 424, 1, 2)
def test_plantcv_roi_circle_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.circle(x=200, y=225, r=75, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 424, 1, 2)
def test_plantcv_roi_circle_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.circle(x=50, y=225, r=75, img=rgb_img)
def test_plantcv_roi_ellipse():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_ellipse")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 360, 1, 2)
def test_plantcv_roi_ellipse_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 360, 1, 2)
def test_plantcv_roi_ellipse_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.ellipse(x=50, y=225, r1=75, r2=50, angle=0, img=rgb_img)
def test_plantcv_roi_multi():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi.multi(rgb_img, coord=[(25, 120), (100, 100)], radius=20)
# Test with debug = None
pcv.params.debug = None
rois1, roi_hierarchy1 = pcv.roi.multi(rgb_img, coord=(25, 120), radius=20, spacing=(10, 10), nrows=3, ncols=6)
# Assert the contours has 18 ROIs
assert len(rois1) == 18
def test_plantcv_roi_multi_bad_input():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The user must input a list of custom coordinates OR inputs to make a grid. Not both
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=[(25, 120), (100, 100)], radius=20, spacing=(10, 10), nrows=3, ncols=6)
def test_plantcv_roi_multi_bad_input_oob():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# nputs to make a grid make ROIs that go off the screen
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=(25000, 12000), radius=2, spacing=(1, 1), nrows=3, ncols=6)
def test_plantcv_roi_multi_bad_input_oob_list():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# All vertices in the list of centers must draw roi's that are inside the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=[(25000, 25000), (25000, 12000), (12000, 12000)], radius=20)
def test_plantcv_roi_custom():
# Read in test RGB image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = "plot"
cnt, hier = pcv.roi.custom(img=img, vertices=[[226, 1], [313, 184], [240, 202], [220, 229], [161, 171]])
assert np.shape(cnt) == (1, 5, 2)
def test_plantcv_roi_custom_bad_input():
# Read in test RGB image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# ROI goes out of bounds
with pytest.raises(RuntimeError):
_ = pcv.roi.custom(img=img, vertices=[[226, -1], [3130, 1848], [2404, 2029], [2205, 2298], [1617, 1761]])
# ##############################
# Tests for the transform subpackage
# ##############################
def test_plantcv_transform_get_color_matrix():
# load in target_matrix
matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
matrix_compare = matrix_file['arr_0']
# Read in rgb_img and gray-scale mask
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
# The result should be a len(np.unique(mask))-1 x 4 matrix
headers, matrix = pcv.transform.get_color_matrix(rgb_img, mask)
assert np.array_equal(matrix, matrix_compare)
def test_plantcv_transform_get_color_matrix_img():
# Read in two gray-scale images
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = pcv.transform.get_color_matrix(rgb_img, mask)
def test_plantcv_transform_get_color_matrix_mask():
# Read in two gray-scale images
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK))
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = pcv.transform.get_color_matrix(rgb_img, mask)
def test_plantcv_transform_get_matrix_m():
# load in comparison matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_compare_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_compare_b = matrix_b_file['arr_0']
# read in matrices
t_matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
t_matrix = t_matrix_file['arr_0']
s_matrix_file = np.load(os.path.join(TEST_DATA, TEST_SOURCE1_MATRIX), encoding="latin1")
s_matrix = s_matrix_file['arr_0']
# apply matrices to function
matrix_a, matrix_m, matrix_b = pcv.transform.get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_plantcv_transform_get_matrix_m_unequal_data():
# load in comparison matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M2), encoding="latin1")
matrix_compare_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B2), encoding="latin1")
matrix_compare_b = matrix_b_file['arr_0']
# read in matrices
t_matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
t_matrix = t_matrix_file['arr_0']
s_matrix_file = np.load(os.path.join(TEST_DATA, TEST_SOURCE2_MATRIX), encoding="latin1")
s_matrix = s_matrix_file['arr_0']
# apply matrices to function
matrix_a, matrix_m, matrix_b = pcv.transform.get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_plantcv_transform_calc_transformation_matrix():
# load in comparison matrices
matrix_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_compare = matrix_file['arr_0']
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
# apply to function
_, matrix_t = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b)
matrix_t = np.rint(matrix_t)
matrix_compare = np.rint(matrix_compare)
assert np.array_equal(matrix_t, matrix_compare)
def test_plantcv_transform_calc_transformation_matrix_b_incorrect():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
matrix_b = np.asmatrix(matrix_b, float)
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b.T)
def test_plantcv_transform_calc_transformation_matrix_not_mult():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b[:3])
def test_plantcv_transform_calc_transformation_matrix_not_mat():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m[:, 1], matrix_b[:, 1])
def test_plantcv_transform_apply_transformation():
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# Test with debug = None
pcv.params.debug = None
corrected_img = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# assert source and corrected have same shape
assert np.array_equal(corrected_img, corrected_compare)
def test_plantcv_transform_apply_transformation_incorrect_t():
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
with pytest.raises(RuntimeError):
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
def test_plantcv_transform_apply_transformation_incorrect_img():
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
def test_plantcv_transform_save_matrix():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# read in matrix
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# .npz filename
filename = os.path.join(cache_dir, 'test.npz')
pcv.transform.save_matrix(matrix_t, filename)
assert os.path.exists(filename) is True
def test_plantcv_transform_save_matrix_incorrect_filename():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# read in matrix
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# .npz filename
filename = "test"
with pytest.raises(RuntimeError):
pcv.transform.save_matrix(matrix_t, filename)
def test_plantcv_transform_load_matrix():
# read in matrix_t
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# test load function with matrix_t
matrix_t_loaded = pcv.transform.load_matrix(os.path.join(TEST_DATA, TEST_TRANSFORM1))
assert np.array_equal(matrix_t, matrix_t_loaded)
def test_plantcv_transform_correct_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_correct_color")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
matdir = os.path.join(cache_dir, "saved_matrices")
# Read in target, source, and gray-scale mask
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
output_path = os.path.join(matdir)
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, cache_dir)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = None
pcv.params.debug = None
_, _, matrix_t, corrected_img = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(output_path, "target_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "source_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "transformation_matrix.npz")) is True])
def test_plantcv_transform_correct_color_output_dne():
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_correct_color_output_dne")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
# Read in target, source, and gray-scale mask
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
output_path = os.path.join(cache_dir, "saved_matrices_1") # output_directory that does not currently exist
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = None
pcv.params.debug = None
_, _, matrix_t, corrected_img = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(output_path, "target_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "source_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "transformation_matrix.npz")) is True])
def test_plantcv_transform_create_color_card_mask():
# Load target image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_create_color_card_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = None
pcv.params.debug = None
mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110,
120, 130, 140, 150, 160, 170, 180, 190, 200, 210,
220], dtype=np.uint8)))
def test_plantcv_transform_quick_color_check():
# Load target image
t_matrix = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
target_matrix = t_matrix['arr_0']
s_matrix = np.load(os.path.join(TEST_DATA, TEST_SOURCE1_MATRIX), encoding="latin1")
source_matrix = s_matrix['arr_0']
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_quick_color_check")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
# Test with debug = None
pcv.params.debug = None
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
assert os.path.exists(os.path.join(cache_dir, "color_quick_check.png"))
def test_plantcv_transform_find_color_card():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
df, start, space = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='adaptgauss', blurry=False,
threshvalue=90)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = None
pcv.params.debug = None
mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110,
120, 130, 140, 150, 160, 170, 180, 190, 200, 210,
220], dtype=np.uint8)))
def test_plantcv_transform_find_color_card_optional_parameters():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with threshold ='normal'
df1, start1, space1 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='normal', blurry=True,
background='light', threshvalue=90, label="prefix")
assert pcv.outputs.observations["prefix"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_otsu():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card_otsu")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with threshold ='normal'
df1, start1, space1 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='otsu', blurry=True,
background='light', threshvalue=90, label="prefix")
assert pcv.outputs.observations["prefix"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_optional_size_parameters():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size="mean")
assert pcv.outputs.observations["default"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_optional_size_parameters_none():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size=None)
assert pcv.outputs.observations.get("default") is None
def test_plantcv_transform_find_color_card_bad_record_chip_size():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size='averageeeed')
assert pcv.outputs.observations["default"]["color_chip_size"]["value"] is None
def test_plantcv_transform_find_color_card_bad_thresh_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='gaussian')
def test_plantcv_transform_find_color_card_bad_background_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, background='lite')
def test_plantcv_transform_find_color_card_bad_colorcard():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_WITH_HEXAGON))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img)
def test_plantcv_transform_rescale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_rescale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rescale(gray_img=gray_img, min_value=0, max_value=100)
pcv.params.debug = "plot"
rescaled_img = pcv.transform.rescale(gray_img=gray_img, min_value=0, max_value=100)
assert max(np.unique(rescaled_img)) == 100
def test_plantcv_transform_rescale_bad_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
_ = pcv.transform.rescale(gray_img=rgb_img)
def test_plantcv_transform_resize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_trancform_resize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (100, 100)
# Test with debug "print"
pcv.params.debug = "print"
_ = pcv.transform.resize(img=gray_img, size=size, interpolation="auto")
# Test with debug "plot"
pcv.params.debug = "plot"
resized_img = pcv.transform.resize(img=gray_img, size=size, interpolation="auto")
assert resized_img.shape == size
def test_plantcv_transform_resize_unsupported_method():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.resize(img=gray_img, size=(100, 100), interpolation="mymethod")
def test_plantcv_transform_resize_crop():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (20, 20)
resized_im = pcv.transform.resize(img=gray_img, size=size, interpolation=None)
assert resized_im.shape == size
def test_plantcv_transform_resize_pad():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (100, 100)
resized_im = pcv.transform.resize(img=gray_img, size=size, interpolation=None)
assert resized_im.shape == size
def test_plantcv_transform_resize_pad_crop_color():
color_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL))
size = (100, 100)
resized_im = pcv.transform.resize(img=color_img, size=size, interpolation=None)
assert resized_im.shape == (size[1], size[0], 3)
def test_plantcv_transform_resize_factor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_trancform_resize_factor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
# Resizing factors
factor_x = 0.5
factor_y = 0.2
# Test with debug "print"
pcv.params.debug = "print"
_ = pcv.transform.resize_factor(img=gray_img, factors=(factor_x, factor_y), interpolation="auto")
# Test with debug "plot"
pcv.params.debug = "plot"
resized_img = pcv.transform.resize_factor(img=gray_img, factors=(factor_x, factor_y), interpolation="auto")
output_size = resized_img.shape
expected_size = (int(gray_img.shape[0] * factor_y), int(gray_img.shape[1] * factor_x))
assert output_size == expected_size
def test_plantcv_transform_resize_factor_bad_input():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.resize_factor(img=gray_img, factors=(0, 2), interpolation="auto")
def test_plantcv_transform_nonuniform_illumination_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_nonuniform_illumination")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
pcv.params.debug = "plot"
_ = pcv.transform.nonuniform_illumination(img=rgb_img, ksize=11)
pcv.params.debug = "print"
corrected = pcv.transform.nonuniform_illumination(img=rgb_img, ksize=11)
assert np.mean(corrected) < np.mean(rgb_img)
def test_plantcv_transform_nonuniform_illumination_gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_nonuniform_illumination")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Load rgb image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "plot"
_ = pcv.transform.nonuniform_illumination(img=gray_img, ksize=11)
pcv.params.debug = "print"
corrected = pcv.transform.nonuniform_illumination(img=gray_img, ksize=11)
assert np.shape(corrected) == np.shape(gray_img)
# ##############################
# Tests for the threshold subpackage
# ##############################
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_binary(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.binary(gray_img=gray_img, threshold=25, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_binary_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.binary(gray_img=gray_img, threshold=25, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_gaussian(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.gaussian(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_gaussian_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.gaussian(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_mean(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.mean(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_mean_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.mean(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_otsu(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GREENMAG), -1)
# Test with object set to light
pcv.params.debug = None
binary_img = pcv.threshold.otsu(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_otsu_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.otsu(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("channel,lower_thresh,upper_thresh", [["HSV", [0, 0, 0], [255, 255, 255]],
["LAB", [0, 0, 0], [255, 255, 255]],
["RGB", [0, 0, 0], [255, 255, 255]],
["GRAY", [0], [255]]])
def test_plantcv_threshold_custom_range_rgb(channel, lower_thresh, upper_thresh):
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
mask, binary_img = pcv.threshold.custom_range(img, lower_thresh=lower_thresh, upper_thresh=upper_thresh,
channel=channel)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_custom_range_grayscale():
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
# # Test channel='gray'
mask, binary_img = pcv.threshold.custom_range(gray_img, lower_thresh=[0], upper_thresh=[255], channel='gray')
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_custom_range_bad_input_hsv():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2, 2], channel='HSV')
def test_plantcv_threshold_custom_range_bad_input_rgb():
# Read in test data
pcv.params.debug = None
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2, 2], channel='RGB')
def test_plantcv_threshold_custom_range_bad_input_lab():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2], channel='LAB')
def test_plantcv_threshold_custom_range_bad_input_gray():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2], channel='gray')
def test_plantcv_threshold_custom_range_bad_input_channel():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0], upper_thresh=[2], channel='CMYK')
@pytest.mark.parametrize("channel", ["all", "any"])
def test_plantcv_threshold_saturation(channel):
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
thresh = pcv.threshold.saturation(rgb_img=rgb_img, threshold=254, channel=channel)
assert len(np.unique(thresh)) == 2
def test_plantcv_threshold_saturation_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.threshold.saturation(rgb_img=rgb_img, threshold=254, channel="red")
def test_plantcv_threshold_triangle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_threshold_triangle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = None
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="dark", xstep=10)
pcv.params.debug = "plot"
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="light", xstep=10)
pcv.params.debug = "print"
binary_img = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="light", xstep=10)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_triangle_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="lite", xstep=10)
def test_plantcv_threshold_texture():
# Test with debug = None
pcv.params.debug = None
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
binary_img = pcv.threshold.texture(gray_img, ksize=6, threshold=7, offset=3, texture_method='dissimilarity',
borders='nearest', max_value=255)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
@pytest.mark.parametrize("bad_type", ["native", "nan", "inf"])
def test_plantcv_threshold_mask_bad(bad_type):
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
bad_img[2, 2] = np.inf
bad_img[2, 3] = np.nan
sz = np.shape(bad_img)
pcv.params.debug = None
mask = pcv.threshold.mask_bad(bad_img, bad_type=bad_type)
assert((np.shape(mask) == sz) and (len(np.unique(mask)) == 2))
def test_plantcv_threshold_mask_bad_native_bad_input():
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
sz = np.shape(bad_img)
mask10 = pcv.threshold.mask_bad(bad_img, bad_type='native')
assert mask10.all() == np.zeros(sz, dtype='uint8').all()
def test_plantcv_threshold_mask_bad_nan_bad_input():
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
bad_img[2, 2] = np.inf
sz = np.shape(bad_img)
mask11 = pcv.threshold.mask_bad(bad_img, bad_type='nan')
assert mask11.all() == np.zeros(sz, dtype='uint8').all()
def test_plantcv_threshold_mask_bad_input_color_img():
# Read in test data
bad_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.threshold.mask_bad(bad_img, bad_type='nan')
# ###################################
# Tests for the visualize subpackage
# ###################################
def test_plantcv_visualize_auto_threshold_methods_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_threshold_methods")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.visualize.auto_threshold_methods(gray_img=img)
def test_plantcv_visualize_auto_threshold_methods():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_threshold_methods")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
_ = pcv.visualize.auto_threshold_methods(gray_img=img)
pcv.params.debug = "plot"
labeled_imgs = pcv.visualize.auto_threshold_methods(gray_img=img)
assert len(labeled_imgs) == 5 and np.shape(labeled_imgs[0])[0] == np.shape(img)[0]
@pytest.mark.parametrize("debug,axes", [["print", True], ["plot", False]])
def test_plantcv_visualize_pseudocolor(debug, axes, tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
pcv.params.debug_outdir = cache_dir
# Input image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
r, c = img.shape
# generate 200 "bad" pixels
mask_bad = np.zeros((r, c), dtype=np.uint8)
mask_bad = np.reshape(mask_bad, (-1, 1))
mask_bad[0:100] = 255
mask_bad = np.reshape(mask_bad, (r, c))
# Debug mode
pcv.params.debug = debug
pseudo_img = pcv.visualize.pseudocolor(gray_img=img, mask=None, title="Pseudocolored image", axes=axes,
bad_mask=mask_bad)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(pseudo_img), TEST_BINARY_DIM))
@pytest.mark.parametrize("bkgrd,axes,pad", [["image", True, "auto"], ["white", False, 1], ["black", True, "auto"]])
def test_plantcv_visualize_pseudocolor_mask(bkgrd, axes, pad):
# Test with debug = None
pcv.params.debug = None
# Input image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Input mask
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Input contours
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
r, c = img.shape
# generate 200 "bad" pixels
mask_bad = np.zeros((r, c), dtype=np.uint8)
mask_bad = np.reshape(mask_bad, (-1, 1))
mask_bad[0:100] = 255
mask_bad = np.reshape(mask_bad, (r, c))
pseudo_img = pcv.visualize.pseudocolor(gray_img=img, obj=obj_contour, mask=mask, background=bkgrd,
bad_mask=mask_bad, title="Pseudocolored image", axes=axes, obj_padding=pad)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(pseudo_img), TEST_BINARY_DIM)):
assert 1
else:
assert 0
def test_plantcv_visualize_pseudocolor_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img)
def test_plantcv_visualize_pseudocolor_bad_background():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor_bad_background")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img, mask=mask, background="pink")
def test_plantcv_visualize_pseudocolor_bad_padding():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor_bad_background")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img, mask=mask, obj=obj_contour, obj_padding="pink")
def test_plantcv_visualize_pseudocolor_bad_mask():
# Test with debug = None
pcv.params.debug = None
def test_plantcv_visualize_colorize_masks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=[(0, 0, 0), (1, 1, 1)])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=[(0, 0, 0), (1, 1, 1)])
# Test with debug = None
pcv.params.debug = None
colored_img = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=['red', 'blue'])
# Assert that the output image has the dimensions of the input image
assert not np.average(colored_img) == 0
def test_plantcv_visualize_colorize_masks_bad_input_empty():
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[], colors=[])
def test_plantcv_visualize_colorize_masks_bad_input_mismatch_number():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']], colors=['red', 'green', 'blue'])
def test_plantcv_visualize_colorize_masks_bad_color_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']], colors=['red', 1.123])
@pytest.mark.parametrize("bins,lb,ub,title", [[200, 0, 255, "Include Title"], [100, None, None, None]])
def test_plantcv_visualize_histogram(bins, lb, ub, title):
# Test with debug = None
pcv.params.debug = None
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
fig_hist, hist_df = pcv.visualize.histogram(img=img, mask=mask, bins=bins, lower_bound=lb, upper_bound=ub,
title=title, hist_data=True)
assert all([isinstance(fig_hist, ggplot), isinstance(hist_df, pd.core.frame.DataFrame)])
def test_plantcv_visualize_histogram_no_mask():
# Test with debug = None
pcv.params.debug = None
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
fig_hist = pcv.visualize.histogram(img=img, mask=None)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_rgb_img():
# Test with debug = None
pcv.params.debug = None
# Test RGB input image
img_rgb = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
fig_hist = pcv.visualize.histogram(img=img_rgb)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_multispectral_img():
# Test with debug = None
pcv.params.debug = None
# Test multi-spectral image
img_rgb = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_multi = np.concatenate((img_rgb, img_rgb), axis=2)
fig_hist = pcv.visualize.histogram(img=img_multi)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_no_img():
with pytest.raises(RuntimeError):
_ = pcv.visualize.histogram(img=None)
def test_plantcv_visualize_histogram_array():
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.histogram(img=img[0, :])
def test_plantcv_visualize_clustered_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_BACKGROUND), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_CONTOUR), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_HIERARCHY), encoding="latin1")
cluster_i = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_CLUSTERS), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
cluster = [cluster_i[arr_n] for arr_n in cluster_i]
# Test in plot mode
pcv.params.debug = "plot"
# Reset the saved color scale (can be saved between tests)
pcv.params.saved_color_scale = None
_ = pcv.visualize.clustered_contours(img=img1, grouped_contour_indices=cluster, roi_objects=objs,
roi_obj_hierarchy=obj_hierarchy, bounding=False)
# Test in print mode
pcv.params.debug = "print"
# Reset the saved color scale (can be saved between tests)
pcv.params.saved_color_scale = None
cluster_img = pcv.visualize.clustered_contours(img=img, grouped_contour_indices=cluster, roi_objects=objs,
roi_obj_hierarchy=obj_hierarchy, nrow=2, ncol=2, bounding=True)
assert np.sum(cluster_img) > np.sum(img)
def test_plantcv_visualize_colorspaces():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = "plot"
vis_img_small = pcv.visualize.colorspaces(rgb_img=img, original_img=False)
pcv.params.debug = "print"
vis_img = pcv.visualize.colorspaces(rgb_img=img)
assert np.shape(vis_img)[1] > (np.shape(img)[1]) and np.shape(vis_img_small)[1] > (np.shape(img)[1])
def test_plantcv_visualize_colorspaces_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorspaces(rgb_img=img)
def test_plantcv_visualize_overlay_two_imgs():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
pcv.params.debug = None
out_img = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
sample_pt1 = img1[1445, 1154]
sample_pt2 = img2[1445, 1154]
sample_pt3 = out_img[1445, 1154]
pred_rgb = (sample_pt1 * 0.5) + (sample_pt2 * 0.5)
pred_rgb = pred_rgb.astype(np.uint8)
assert np.array_equal(sample_pt3, pred_rgb)
def test_plantcv_visualize_overlay_two_imgs_grayscale():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_grayscale")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
out_img = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
sample_pt1 = np.array([255, 255, 255], dtype=np.uint8)
sample_pt2 = np.array([255, 255, 255], dtype=np.uint8)
sample_pt3 = out_img[1445, 1154]
pred_rgb = (sample_pt1 * 0.5) + (sample_pt2 * 0.5)
pred_rgb = pred_rgb.astype(np.uint8)
assert np.array_equal(sample_pt3, pred_rgb)
def test_plantcv_visualize_overlay_two_imgs_bad_alpha():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_bad_alpha")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
alpha = -1
with pytest.raises(RuntimeError):
_ = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2, alpha=alpha)
def test_plantcv_visualize_overlay_two_imgs_size_mismatch():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_size_mismatch")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
with pytest.raises(RuntimeError):
_ = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
# ##############################
# Tests for the utils subpackage
# ##############################
def test_plantcv_utils_json2csv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv")
os.mkdir(cache_dir)
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "merged_output.json"),
csv_file=os.path.join(cache_dir, "exports"))
assert all([os.path.exists(os.path.join(cache_dir, "exports-single-value-traits.csv")),
os.path.exists(os.path.join(cache_dir, "exports-multi-value-traits.csv"))])
def test_plantcv_utils_json2csv_no_json():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv_no_json")
os.mkdir(cache_dir)
with pytest.raises(IOError):
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "not_a_file.json"),
csv_file=os.path.join(cache_dir, "exports"))
def test_plantcv_utils_json2csv_bad_json():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv_bad_json")
os.mkdir(cache_dir)
with pytest.raises(ValueError):
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "incorrect_json_data.txt"),
csv_file=os.path.join(cache_dir, "exports"))
def test_plantcv_utils_sample_images_snapshot():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
snapshot_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
img_outdir = os.path.join(cache_dir, "snapshot")
plantcv.utils.sample_images(source_path=snapshot_dir, dest_path=img_outdir, num=3)
assert os.path.exists(os.path.join(cache_dir, "snapshot"))
def test_plantcv_utils_sample_images_flatdir():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
flat_dir = os.path.join(TEST_DATA)
img_outdir = os.path.join(cache_dir, "images")
plantcv.utils.sample_images(source_path=flat_dir, dest_path=img_outdir, num=30)
random_images = os.listdir(img_outdir)
assert all([len(random_images) == 30, len(np.unique(random_images)) == 30])
def test_plantcv_utils_sample_images_bad_source():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
fake_dir = os.path.join(TEST_DATA, "snapshot")
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(IOError):
plantcv.utils.sample_images(source_path=fake_dir, dest_path=img_outdir, num=3)
def test_plantcv_utils_sample_images_bad_flat_num():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
flat_dir = os.path.join(TEST_DATA)
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(RuntimeError):
plantcv.utils.sample_images(source_path=flat_dir, dest_path=img_outdir, num=300)
def test_plantcv_utils_sample_images_bad_phenofront_num():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
snapshot_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(RuntimeError):
plantcv.utils.sample_images(source_path=snapshot_dir, dest_path=img_outdir, num=300)
def test_plantcv_utils_tabulate_bayes_classes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_tabulate_bayes_classes")
os.mkdir(cache_dir)
outfile = os.path.join(cache_dir, "rgb_table.txt")
plantcv.utils.tabulate_bayes_classes(input_file=os.path.join(TEST_DATA, PIXEL_VALUES), output_file=outfile)
table = pd.read_csv(outfile, sep="\t")
assert table.shape == (228, 2)
def test_plantcv_utils_tabulate_bayes_classes_missing_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_tabulate_bayes_classes_missing_input")
os.mkdir(cache_dir)
outfile = os.path.join(cache_dir, "rgb_table.txt")
with pytest.raises(IOError):
plantcv.utils.tabulate_bayes_classes(input_file=os.path.join(PIXEL_VALUES), output_file=outfile)
# ##############################
# Clean up test files
# ##############################
def teardown_function():
shutil.rmtree(TEST_TMPDIR)
|
mit
|
marsyas/marsyas
|
src/marsyas_python/aim.py
|
7
|
7182
|
#!/usr/bin/env python
# Plotting auditory image model stuff
import pickle
from pylab import *
import sys
from matplotlib import pyplot
from marsyas import MarSystemManager,MarControlPtr
# create a global MarSystemManager
msm = MarSystemManager()
# helper function to create a list of MarSystems from a list of names
def create(msys_list_names):
return map(msm.create,msys_list_names)
# create a Series MarSystem composed of a list of MarSystems
def series(name, msys_list):
net = msm.create("Series/"+name)
map(net.addMarSystem, msys_list)
return net
# create a Fanout MarSystem composed of a list of MarSystems
def fanout(name, msys_list):
net = msm.create("Fanout/"+name)
map(net.addMarSystem, msys_list)
return net
# create the network and make nice interface with top-level controls
def create_network():
net = series("net", create(["SoundFileSource/src",
"AimPZFC2/aimpzfc",
"AimHCL2/aimhcl2",
"AimSAI/aimsai",
"Sum/sum",
# "AutoCorrelation/acr",
# "BeatHistogram/histo",
# "Peaker/pkr",
# "MaxArgMax/mxr"
]))
return net
def realvec2array(inrealvec):
outarray = zeros((inrealvec.getCols(), inrealvec.getRows()))
k = 0;
for i in range(0,inrealvec.getCols()):
for j in range(0, inrealvec.getRows()):
outarray[i,j] = inrealvec[k]
k = k + 1
return outarray
def pca(data):
values, vecs = linalg.eigh(cov(data))
perm = argsort(-values)
return values[perm], vecs[:, perm]
def control2array(net,cname,so=0,eo=0,st=0,et=0):
net_control = net.getControl(cname)
net_realvec = net_control.to_realvec()
net_array = realvec2array(net_realvec)
if et==0:
et = net_array.shape[0]
eo = net_array.shape[1]
res_array = net_array.transpose()
res_array = res_array[so:eo][st:et]
res_array = flipud(res_array)
return res_array
def imageplot(imgdata, cmap = 'jet', aspect='None',img_xlabel='Samples', img_ylabel='Observations',sy=0,ey=0,sx=0,ex=0):
if ex==0:
ex = imgdata.shape[0]
ey = imgdata.shape[1]
imshow(imgdata, cmap=cmap, aspect=aspect, extent=[sy,ey,sx,ex])
xlabel(img_xlabel)
ylabel(img_ylabel)
# set the controls and plot the data
def plot_figure(fname, duration):
net = create_network()
filename = net.getControl("SoundFileSource/src/mrs_string/filename")
inSamples = net.getControl("mrs_natural/inSamples")
# factor = net.getControl("DownSampler/downsampler/mrs_natural/factor")
# mode = net.getControl("Sum/sum/mrs_string/mode");
# acr_compress = net.getControl("AutoCorrelation/acr/mrs_real/magcompress");
filename.setValue_string(fname)
# winSize = int(float(duration) * 44100.0);
winSize = int(1400);
inSamples.setValue_natural(winSize)
# mode.setValue_string("sum_samples");
# factor.setValue_natural(32)
# acr_compress.setValue_real(0.75);
srate = 44100.0
filterbank_output = net.getControl("AimHCL2/aimhcl2/mrs_realvec/processedData")
# net.updControl("BeatHistogram/histo/mrs_natural/startBin", 0);
# net.updControl("BeatHistogram/histo/mrs_natural/endBin", 300);
# net.updControl("Peaker/pkr/mrs_natural/peakStart", 50);
# net.updControl("Peaker/pkr/mrs_natural/peakEnd", 150);
topChannel = 50
for i in range(1,24):
net.tick()
data = filterbank_output.to_realvec()
imgdata = realvec2array(data)
# ossdata = net.getControl("Sum/sum/mrs_realvec/processedData").to_realvec();
# acrdata = net.getControl("AutoCorrelation/acr/mrs_realvec/processedData").to_realvec();
# bhistodata = net.getControl("BeatHistogram/histo/mrs_realvec/processedData").to_realvec();
# peaks = net.getControl("Peaker/pkr/mrs_realvec/processedData");
#figure()
#plot(peaks.to_realvec())
# max_peak = net.getControl("mrs_realvec/processedData");
# print max_peak.to_realvec()
#figure()
#plot(ossdata)
#figure()
#plot(acrdata)
# figure()
# plot(bhistodata)
print imgdata.shape
#print ossdata.getSize()
# (values, vecs) = pca(imgdata.transpose())
# figure()
# plot(vecs[1])
# figure()
# plot(vecs[2])
# figure()
# plot(vecs[3])
# figure()
# plot(vecs[4])
# figure()
# plot(vecs[5])
# print vecs.shape
cname = "AimHCL2/aimhcl2/mrs_realvec/processedData"
hold(False)
figure(1)
imshow(imgdata.transpose(), cmap = 'jet', aspect='auto', extent=[0.0, winSize / srate, 1, 78])
figure(2)
array = control2array(net, cname,0,40,0,500);
imageplot(array,'bone_r','auto')
title(cname);
# a = (realvec2array(data), 'bone_r', 'auto', 0, 50, 0, 40)
# a = {imgdata:array, cmap:'bone_r', aspect:'auto'}
# imageplot(**a)
# print pickle.dump(a,")
# imageplot(*a)
figure(3)
imageplot(control2array(net, cname), 'jet', 'auto', 'Frequency(Hz)', 'Time(msec)', 0, winSize/srate, 0, 6000)
figure(4);
imageplot(control2array(net, "AimSAI/aimsai/mrs_realvec/processedData"), 'jet', 'auto')
figure(5);
for i in range(1,topChannel):
params={'axes.linewidth' : 0}
rcParams.update(params)
subplots_adjust(hspace=0.001)
ax = subplot(topChannel,1,i)
ax.plot(imgdata[0:winSize,topChannel-i]);
yticklabels = ax.get_yticklabels()
xticklabels = ax.get_xticklabels()
setp(yticklabels, visible=False)
setp(xticklabels, visible=False)
for tick in ax.get_xticklines():
tick.set_visible(False)
for tick in ax.get_yticklines():
tick.set_visible(False)
figure(6)
subplot(321);
plot(imgdata[0:512,58]);
subplot(322)
plot(imgdata[0:512,66]);
subplot(323)
c1 = correlate(imgdata[0:512,58], imgdata[0:512,66], mode='full');
# plot(c1[winSize/2:winSize/2+512]);
s1 = argmax(c1);
plot(c1[s1:s1+512]);
# delay1 = (argmax(correlate(imgdata[1000:2000,50], imgdata[1000:2000,40], mode='full')) % 1000);
subplot(324)
c2 = correlate(imgdata[0:512,58], imgdata[0:512,59], mode='full');
s2 = argmax(c2);
plot(c2[s2:s2+512]);
#plot(c2[winSize/2:winSize/2+512]);
# delay2 = (argmax(correlate(imgdata[1000:2000,40], imgdata[1000:2000,30], mode='full')) % 1000);
subplot(325)
plot(c1)
hold(True)
plot(c2)
hold(False)
subplot(326)
plot(c2)
# print delay1
# print delay2
# subplot(325)
# hold(False)
# plot(imgdata[1000:2000:,50]);
# hold(True)
# plot(imgdata[1000-delay1:2000-delay1:,40]);
# hold(False)
# # figure();
# subplot(326)
# hold(False)
# plot(imgdata[1000:2000:,40]);
# hold(True)
# plot(imgdata[1000+delay2:2000+delay2:,30]);
# hold(False)
# # show();
corr_image = zeros((78,78))
mean_period = 0;
for i in range(0,78):
for j in range(0,78):
a = correlate(imgdata[0:512,i],imgdata[0:512,j], mode='full');
offset = argmax(a);
b = a[offset:offset+512];
period = 0
if (size(b) > 4):
for k in range(2,size(b)-1):
if ((b[k] >= b[k-1]) and (b[k] >= b[k+1])):
period = k
break
# figure(6)
# plot(b)
# raw_input("Press any key to continue")
mean_period = mean_period + period
corr_image[i,j] = b[period]
print (mean_period / (78 * 78))
figure(7);
imshow(corr_image, cmap = 'jet', aspect='auto');
raw_input("Press any key to continue")
# call the plot function
plot_figure(sys.argv[1], sys.argv[2])
|
gpl-2.0
|
yunfeilu/scikit-learn
|
examples/decomposition/plot_pca_vs_lda.py
|
68
|
1807
|
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
|
bsd-3-clause
|
blue-yonder/postgraas_server
|
docs/conf.py
|
1
|
8759
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../postgraas_server")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.imgmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'postgraas_server'
copyright = u'2016, Sebastian Neubauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from postgraas_server import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'postgraas_server-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'postgraas_server Documentation',
u'Sebastian Neubauer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
|
apache-2.0
|
LCAV/pyroomacoustics
|
examples/stft_block_processing.py
|
1
|
2227
|
"""
STFT Block Processing
=====================
In this example, we will apply a simple moving average filter in the frequency
domain. We will use the STFT class that lets us do block-wise processing
suitable for real-time application on streaming audio.
In this example, we perform offline processing, but the methodology is
block-wise and thus very easy to transfer to the streaming case. We use half
overlapping blocks with Hann windowing and apply a moving average filter in the
frequency domain. Finally, we plot and compare the spectrograms before and
after filtering.
"""
from __future__ import division, print_function
import numpy as np
from scipy.io import wavfile
import matplotlib.pyplot as plt
import pyroomacoustics as pra
import os
# filter to apply
h_len = 99
h = np.ones(h_len)
h /= np.linalg.norm(h)
# parameters
block_size = 512 - h_len + 1 # make sure the FFT size is a power of 2
hop = block_size // 2 # half overlap
window = pra.hann(
block_size, flag="asymmetric", length="full"
) # analysis window (no synthesis window)
# open single channel audio file
fn = os.path.join(os.path.dirname(__file__), "input_samples", "singing_8000.wav")
fs, audio = wavfile.read(fn)
# Create the STFT object
stft = pra.transform.STFT(
block_size, hop=hop, analysis_window=window, channels=1, streaming=True
)
# set the filter and the appropriate amount of zero padding (back)
if h_len > 1:
stft.set_filter(h, zb=h.shape[0] - 1)
# collect the processed blocks
processed_audio = np.zeros(audio.shape)
# process the signals while full blocks are available
n = 0
while audio.shape[0] - n > hop:
# go to frequency domain
stft.analysis(
audio[n : n + hop]
)
stft.process() # apply the filter
# copy processed block in the output buffer
processed_audio[n : n + hop] = stft.synthesis()
n += hop
# plot the spectrogram before and after filtering
plt.figure()
plt.subplot(2, 1, 1)
plt.specgram(audio[: n - hop].astype(np.float32), NFFT=256, Fs=fs, vmin=-20, vmax=30)
plt.title("Original Signal")
plt.subplot(2, 1, 2)
plt.specgram(processed_audio[hop:n], NFFT=256, Fs=fs, vmin=-20, vmax=30)
plt.title("Lowpass Filtered Signal")
plt.tight_layout(pad=0.5)
plt.show()
|
mit
|
jonyroda97/redbot-amigosprovaveis
|
lib/matplotlib/tests/test_backend_qt5.py
|
2
|
5103
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import matplotlib
from matplotlib import pyplot as plt
from matplotlib._pylab_helpers import Gcf
from numpy.testing import assert_equal
import pytest
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
with matplotlib.rc_context(rc={'backend': 'Qt5Agg'}):
qt_compat = pytest.importorskip('matplotlib.backends.qt_compat',
minversion='5')
from matplotlib.backends.backend_qt5 import (
MODIFIER_KEYS, SUPER, ALT, CTRL, SHIFT) # noqa
QtCore = qt_compat.QtCore
_, ControlModifier, ControlKey = MODIFIER_KEYS[CTRL]
_, AltModifier, AltKey = MODIFIER_KEYS[ALT]
_, SuperModifier, SuperKey = MODIFIER_KEYS[SUPER]
_, ShiftModifier, ShiftKey = MODIFIER_KEYS[SHIFT]
@pytest.mark.backend('Qt5Agg')
def test_fig_close():
# save the state of Gcf.figs
init_figs = copy.copy(Gcf.figs)
# make a figure using pyplot interface
fig = plt.figure()
# simulate user clicking the close button by reaching in
# and calling close on the underlying Qt object
fig.canvas.manager.window.close()
# assert that we have removed the reference to the FigureManager
# that got added by plt.figure()
assert init_figs == Gcf.figs
@pytest.mark.parametrize(
'qt_key, qt_mods, answer',
[
(QtCore.Qt.Key_A, ShiftModifier, 'A'),
(QtCore.Qt.Key_A, QtCore.Qt.NoModifier, 'a'),
(QtCore.Qt.Key_A, ControlModifier, 'ctrl+a'),
(QtCore.Qt.Key_Aacute, ShiftModifier,
'\N{LATIN CAPITAL LETTER A WITH ACUTE}'),
(QtCore.Qt.Key_Aacute, QtCore.Qt.NoModifier,
'\N{LATIN SMALL LETTER A WITH ACUTE}'),
(ControlKey, AltModifier, 'alt+control'),
(AltKey, ControlModifier, 'ctrl+alt'),
(QtCore.Qt.Key_Aacute, (ControlModifier | AltModifier | SuperModifier),
'ctrl+alt+super+\N{LATIN SMALL LETTER A WITH ACUTE}'),
(QtCore.Qt.Key_Backspace, QtCore.Qt.NoModifier, 'backspace'),
(QtCore.Qt.Key_Backspace, ControlModifier, 'ctrl+backspace'),
(QtCore.Qt.Key_Play, QtCore.Qt.NoModifier, None),
],
ids=[
'shift',
'lower',
'control',
'unicode_upper',
'unicode_lower',
'alt_control',
'control_alt',
'modifier_order',
'backspace',
'backspace_mod',
'non_unicode_key',
]
)
@pytest.mark.backend('Qt5Agg')
def test_correct_key(qt_key, qt_mods, answer):
"""
Make a figure
Send a key_press_event event (using non-public, qt5 backend specific api)
Catch the event
Assert sent and caught keys are the same
"""
qt_canvas = plt.figure().canvas
event = mock.Mock()
event.isAutoRepeat.return_value = False
event.key.return_value = qt_key
event.modifiers.return_value = qt_mods
def receive(event):
assert event.key == answer
qt_canvas.mpl_connect('key_press_event', receive)
qt_canvas.keyPressEvent(event)
@pytest.mark.backend('Qt5Agg')
def test_dpi_ratio_change():
"""
Make sure that if _dpi_ratio changes, the figure dpi changes but the
widget remains the same physical size.
"""
prop = 'matplotlib.backends.backend_qt5.FigureCanvasQT._dpi_ratio'
with mock.patch(prop, new_callable=mock.PropertyMock) as p:
p.return_value = 3
fig = plt.figure(figsize=(5, 2), dpi=120)
qt_canvas = fig.canvas
qt_canvas.show()
from matplotlib.backends.backend_qt5 import qApp
# Make sure the mocking worked
assert qt_canvas._dpi_ratio == 3
size = qt_canvas.size()
qt_canvas.manager.show()
qt_canvas.draw()
qApp.processEvents()
# The DPI and the renderer width/height change
assert fig.dpi == 360
assert qt_canvas.renderer.width == 1800
assert qt_canvas.renderer.height == 720
# The actual widget size and figure physical size don't change
assert size.width() == 600
assert size.height() == 240
assert_equal(qt_canvas.get_width_height(), (600, 240))
assert_equal(fig.get_size_inches(), (5, 2))
p.return_value = 2
assert qt_canvas._dpi_ratio == 2
qt_canvas.draw()
qApp.processEvents()
# this second processEvents is required to fully run the draw.
# On `update` we notice the DPI has changed and trigger a
# resize event to refresh, the second processEvents is
# required to process that and fully update the window sizes.
qApp.processEvents()
# The DPI and the renderer width/height change
assert fig.dpi == 240
assert qt_canvas.renderer.width == 1200
assert qt_canvas.renderer.height == 480
# The actual widget size and figure physical size don't change
assert size.width() == 600
assert size.height() == 240
assert_equal(qt_canvas.get_width_height(), (600, 240))
assert_equal(fig.get_size_inches(), (5, 2))
|
gpl-3.0
|
dhruv13J/scikit-learn
|
examples/linear_model/plot_polynomial_interpolation.py
|
251
|
1895
|
#!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
|
bsd-3-clause
|
rfinn/LCS
|
python/LCSpaper1.py
|
1
|
45395
|
#!/usr/bin/env python
'''
GOAL:
- this code contains all of the code to make figures for paper1
REQUIRED MODULES
- LCSbase.py
'''
###########################
###### IMPORT MODULES
###########################
import LCSbase as lb
from matplotlib import pyplot as plt
import numpy as np
import os
from LCScommon_py3 import *
from astropy.io import fits
from astropy.cosmology import WMAP9 as cosmo
import argparse# here is min mass = 9.75
###########################
##### SET UP ARGPARSE
###########################
parser = argparse.ArgumentParser(description ='Run sextractor, scamp, and swarp to determine WCS solution and make mosaics')
parser.add_argument('--minmass', dest = 'minmass', default = 9., help = 'minimum stellar mass for sample. default is log10(M*) > 7.9')
parser.add_argument('--diskonly', dest = 'diskonly', default = 1, help = 'True/False (enter 1 or 0). normalize by Simard+11 disk size rather than Re for single-component sersic fit. Default is true. ')
args = parser.parse_args()
###########################
##### DEFINITIONS
###########################
USE_DISK_ONLY = np.bool(np.float(args.diskonly))#True # set to use disk effective radius to normalize 24um size
if USE_DISK_ONLY:
print('normalizing by radius of disk')
minsize_kpc=1.3 # one mips pixel at distance of hercules
#minsize_kpc=2*minsize_kpc
mstarmin=float(args.minmass)
mstarmax=10.8
minmass=mstarmin #log of M*
ssfrmin=-12.
ssfrmax=-9
spiralcut=0.8
truncation_ratio=0.5
exterior=.68
colors=['k','b','c','g','m','y','r','sienna','0.5']
shapes=['o','*','p','d','s','^','>','<','v']
#colors=['k','b','c','g','m','y','r','sienna','0.5']
truncated=np.array([113107,140175,79360,79394,79551,79545,82185,166185,166687,162832,146659,99508,170903,18236,43796,43817,43821,70634,104038,104181],'i')
# figure setup
plotsize_single=(6.8,5)
plotsize_2panel=(10,5)
params = {'backend': 'pdf',
'axes.labelsize': 24,
'font.size': 20,
'legend.fontsize': 12,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
#'lines.markeredgecolor' : 'k',
#'figure.titlesize': 20,
'mathtext.fontset': 'cm',
'mathtext.rm': 'serif',
'text.usetex': True,
'figure.figsize': plotsize_single}
plt.rcParams.update(params)
figuredir = '/Users/rfinn/Dropbox/Research/MyPapers/LCSpaper1/submit/resubmit4/'
###########################
##### START OF GALAXIES CLASS
###########################
class galaxies(lb.galaxies):
def plotsizedvdr(self,plotsingle=1,reonly=1,onlycoma=0,plotHI=0,plotbadfits=0,lowmass=0,himass=0,cluster=None,plothexbin=True,hexbinmax=40,scalepoint=0,clustername=None,blueflag=False,plotmembcut=True,colormin=.2,colormax=1,colorbydensity=False,plotoman=False,masscut=None,BTcut=None):
# log10(chabrier) = log10(Salpeter) - .25 (SFR estimate)
# log10(chabrier) = log10(diet Salpeter) - 0.1 (Stellar mass estimates)
if plotsingle:
plt.figure(figsize=(10,6))
ax=plt.gca()
plt.subplots_adjust(left=.1,bottom=.15,top=.9,right=.9)
plt.ylabel('$ \Delta v/\sigma $',fontsize=26)
plt.xlabel('$ \Delta R/R_{200} $',fontsize=26)
plt.legend(loc='upper left',numpoints=1)
colors=self.sizeratio
if colorbydensity:
colors=np.log10(self.s.SIGMA_5)
colormin=-1.5
colormax=1.5
cbticks=np.arange(colormin,colormax+.1,.1)
if USE_DISK_ONLY:
clabel=['$R_{24}/R_d$','$R_{iso}(24)/R_{iso}(r)$']
else:
clabel=['$R_e(24)/R_e(r)$','$R_{iso}(24)/R_{iso}(r)$']
cmaps=['jet_r','jet_r']
v1=[0.2,0.]
v2=[1.2,2]
nplot=1
x=(self.s.DR_R200)
y=abs(self.dv)
flag=self.sampleflag #& self.dvflag
if blueflag:
flag=self.bluesampleflag & self.dvflag
if clustername != None:
flag = flag & (self.s.CLUSTER == clustername)
if masscut != None:
flag = flag & (self.logstellarmass < masscut)
if BTcut != None:
flag = flag & (self.gim2d.B_T_r < 0.3)
if cluster != None:
flag = flag & (self.s.CLUSTER == cluster)
hexflag=self.dvflag
if cluster != None:
hexflag = hexflag & (self.s.CLUSTER == cluster)
nofitflag = self.sfsampleflag & ~self.sampleflag & self.dvflag
nofitflag = self.gim2dflag & (self.gim2d.B_T_r < .2) & self.sfsampleflag & ~self.sampleflag & self.dvflag
if cluster != None:
nofitflag = nofitflag & (self.s.CLUSTER == cluster)
if lowmass:
flag = flag & (self.s.CLUSTER_LX < 1.)
hexflag = hexflag & (self.s.CLUSTER_LX < 1.)
nofitflag = nofitflag & (self.s.CLUSTER_LX < 1.)
if himass:
flag = flag & (self.s.CLUSTER_LX > 1.)
hexflag = hexflag & (self.s.CLUSTER_LX > 1.)
nofitflag = nofitflag & (self.s.CLUSTER_LX > 1.)
if onlycoma:
flag = flag & (self.s.CLUSTER == 'Coma')
if plothexbin:
sp=plt.hexbin(x[hexflag],y[hexflag],gridsize=(30,20),alpha=.7,extent=(0,5,0,10),cmap='gray_r',vmin=0,vmax=hexbinmax)
plt.subplots_adjust(bottom=.15,left=.1,right=.95,top=.95,hspace=.02,wspace=.02)
if plotmembcut:
xl=np.array([-.2,1,1])
yl=np.array([3,3,-0.1])
plt.plot(xl,yl,'k-',lw=2)
elif plotoman: # line to identify infall galaxies from Oman+2013
xl=np.arange(0,2,.1)
plt.plot(xl,-4./3.*xl+2,'k-',lw=3)
#plt.plot(xl,-3./1.2*xl+3,'k-',lw=3)
else: # cut from Jaffe+2011
xl=np.array([0.01,1.2])
yl=np.array([1.5,0])
plt.plot(xl,yl,'k-',lw=2)
if reonly:
nplot=1
else:
nplot=2
if scalepoint:
size=(self.ssfrms[flag]+2)*40
else:
size=60
for i in range(nplot):
if not(reonly):
plt.subplot(1,2,nplot)
nplot +=1
if plotbadfits:
plt.scatter(x[nofitflag],y[nofitflag],marker='x',color='k',s=40,edgecolors='k')#markersize=8,mec='r',mfc='None',label='No Fit')
ax=plt.gca()
if colorbydensity:
sp=plt.scatter(x[flag],y[flag],c=colors[flag],s=size,cmap='jet',vmin=colormin,vmax=colormax,edgecolors=None,lw=0.)
else:
sp=plt.scatter(x[flag],y[flag],c=colors[flag],s=size,cmap='jet_r',vmin=colormin,vmax=colormax,edgecolors=None,lw=0.)
plt.axis([-.1,4.5,-.1,5])
if masscut != None:
plt.axis([-.1,4.5,-.1,4])
if i > 0:
ax.set_yticklabels(([]))
ax.tick_params(axis='both', which='major', labelsize=16)
if plotsingle:
cb=plt.colorbar(sp,fraction=0.08,label=clabel[i],ticks=cbticks)#cax=axins1,ticks=cbticks[i])
#text(.95,.9,clabel[i],transform=ax.transAxes,horizontalalignment='right',fontsize=20)
if plotHI:
f=flag & self.HIflag
plt.plot(x[f],y[f],'bs',mfc='None',mec='b',lw=2,markersize=20)
if not(reonly):
ax.text(0,-.1,'$ \Delta R/R_{200} $',fontsize=22,transform=ax.transAxes,horizontalalignment='center')
ax.text(-1.3,.5,'$\Delta v/\sigma_v $',fontsize=22,transform=ax.transAxes,rotation=90,verticalalignment='center')
if lowmass:
figname=homedir+'research/LocalClusters/SamplePlots/sizedvdr-lowLx'
elif himass:
figname=homedir+'research/LocalClusters/SamplePlots/sizedvdr-hiLx'
else:
figname=homedir+'research/LocalClusters/SamplePlots/sizedvdr'
if plotsingle:
if masscut != None:
plt.savefig(figuredir+'sizedvdr-lowmass-lowBT.eps')
plt.savefig(figuredir+'fig4.pdf')
def compare_cluster_exterior(self):
plt.figure(figsize=plotsize_single)
plt.subplots_adjust(bottom=.15,hspace=.4,top=.95)
plt.subplot(2,2,1)
self.compare_single((self.logstellarmass),baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$ log_{10}(M_*/M_\odot) $',plotname='stellarmass')
plt.legend(loc='upper left')
plt.xticks(np.arange(9,12,.5))
plt.xlim(8.9,11.15)
#xlim(mstarmin,mstarmax)
plt.subplot(2,2,2)
self.compare_single(self.gim2d.B_T_r,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$GIM2D \ B/T $',plotname='BT')
plt.xticks(np.arange(0,1.1,.2))
plt.xlim(-.05,.85)
plt.subplot(2,2,3)
self.compare_single(self.s.ZDIST,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$ Redshift $',plotname='zdist')
plt.xticks(np.arange(0.02,.055,.01))
plt.xlim(.0146,.045)
plt.subplot(2,2,4)
#self.compare_single(self.s.SERSIC_TH50*self.da,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$R_e(r) \ (kpc)$',plotname='Rer')
self.compare_single(self.gim2d.Rhlr,baseflag=(self.sampleflag & ~self.agnflag),plotsingle=False,xlab='$R_e(r) \ (kpc)$',plotname='Rer')
#xticks(arange(2,20,2))
#plt.xlim(2,20)
plt.text(-1.5,1,'$Cumulative \ Distribution$',fontsize=22,transform=plt.gca().transAxes,rotation=90,verticalalignment='center')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/cluster_exterior.png')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/cluster_exterior.eps')
plt.savefig(figuredir+'fig5.eps')
def compare_single(self,var,baseflag=None,plotsingle=True,xlab=None,plotname=None):
if baseflag == None:
f1 = self.sampleflag & self.membflag & ~self.agnflag
f2 = self.sampleflag & ~self.membflag &self.dvflag & ~self.agnflag
else:
f1=baseflag & self.sampleflag & self.membflag & ~self.agnflag
f2=baseflag & self.sampleflag & ~self.membflag & ~self.agnflag
xmin=min(var[baseflag])
xmax=max(var[baseflag])
#print 'xmin, xmax = ',xmin,xmax
print('KS test comparing members and exterior')
(D,p)=ks(var[f1],var[f2])
#t=anderson.anderson_ksamp([var[f1],var[f2]])
#print '%%%%%%%%% ANDERSON %%%%%%%%%%%'
#print 'anderson statistic = ',t[0]
#print 'critical values = ',t[1]
#print 'p-value = ',t[2]
if plotsingle:
plt.figure()#figsize=(12,6))
plt.title('Member vs. External ('+self.prefix+')')
subplots_adjust(bottom=.15,left=.15)
print('hey')
plt.xlabel(xlab,fontsize=18)
#plt.ylabel('$Cumulative \ Distribution $',fontsize=20)
plt.legend(loc='lower right')
plt.hist(var[f1],bins=len(var[f1]),cumulative=True,histtype='step',normed=True,label='Core',range=(xmin,xmax),color='k')
#print var[f2]
plt.hist(var[f2],bins=len(var[f2]),cumulative=True,histtype='step',normed=True,label='External',range=(xmin,xmax),color='0.5')
ylim(-.05,1.05)
ax=gca()
text(.9,.25,'$D = %4.2f$'%(D),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
text(.9,.1,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
return D, p
def plotRe24vsRe(self,plotsingle=1,sbcutobs=20.,prefix=None,usemyflag=0,myflag=None,showerr=0,logy=True,fixPA=False, usedr=False,colorflag=True):
#print 'hi'
if plotsingle:
plt.figure(figsize=(10,8))
ax=plt.gca()
plt.xlabel('$ R_e(r) \ (arcsec)$',fontsize=20)
plt.ylabel('$ R_e(24) \ (arcsec) $',fontsize=20)
#legend(loc='upper left',numpoints=1)
if usemyflag:
flag=myflag
else:
flag=self.sampleflag & (self.sb_obs < sbcutobs)
mflag=flag & self.membflag
nfflag = flag & ~self.membflag & self.dvflag
ffflag = flag & ~self.membflag & ~self.dvflag
print('flag = ',sum(mflag),sum(nfflag),sum(ffflag))
x=(self.gim2d.Rhlr)
if USE_DISK_ONLY:
x=self.gim2d.Rd
if fixPA:
y=self.s.fcre1*mipspixelscale
myerr=self.s.fcre1err*mipspixelscale
else:
y=self.s.fcre1*mipspixelscale
myerr=self.s.fcre1err*mipspixelscale
y=self.s.SUPER_RE1*mipspixelscale*self.DA
myerr=self.s.SUPER_RE1ERR*mipspixelscale*self.DA
if plotsingle:
print('not printing errorbars')
else:
plt.errorbar(x[flag],y[flag],yerr=myerr[flag],fmt=None,ecolor='k')
mstarmin=9.3
mstarmax=11
color=self.logstellarmass
cblabel='$log_{10}(M_*/M\odot) $'
v1=mstarmin
v2=mstarmax
colormap=cm.jet
if usedr:
color=np.log10(sqrt(self.s.DR_R200**2 + self.s.DELTA_V**2))
cblabel='$\Delta r/R_{200}$'
cblabel='$log_{10}(\sqrt(\Delta r/R_{200}^2 + \Delta v/\sigma^2)$'
v1=-.5
v2=.7
colormap=cm.jet_r
if colorflag:
plotcolors = ['r','b']
else:
plotcolors = ['k','0.5']
plt.plot(x[mflag ],y[mflag],'ko',color=plotcolors[0],markersize=8,mec='k')
plt.plot(x[nfflag ],y[nfflag],'ks',color=plotcolors[1],markersize=8,mec='k')
plt.plot(x[ffflag ],y[ffflag],'ks',color=plotcolors[1],markersize=8,mec='k')
uflag = flag & self.upperlimit
print('number of upper limits = ',sum(uflag))
uplimits=np.array(list(zip(ones(sum(uflag)), zeros(sum(uflag)))))
plt.errorbar(x[uflag],y[uflag],yerr=uplimits.T, lolims=True, fmt='*',ecolor='k',color='k',markersize=12)
if plotsingle:
plt.colorbar(sp)
self.addlines(logflag=logy)
ax=plt.gca()
plt.axis([.5,12,-.5,7.3])
def addlines(self,logflag=True):
xl=np.arange(0,100,.5)
plt.plot(xl,xl,'k-')
if logflag:
ax=plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
plt.axis([1,30.,1,30.])
def plotsizehist(self, btcut = None,colorflag=True):
figure(figsize=(6,6))
plt.subplots_adjust(left=.15,bottom=.2,hspace=.1)
axes=[]
plt.subplot(2,1,1)
axes.append(plt.gca())
mybins=np.arange(0,2,.15)
if btcut == None:
flag = self.sampleflag
else:
flag = self.sampleflag & (self.gim2d.B_T_r < btcut)
if colorflag:
colors = ['r','b']
else:
colors = ['k','k']
flags = [flag & self.membflag & ~self.agnflag,flag & ~self.membflag & ~self.agnflag]
labels = ['$Core$','$External$']
for i in range(len(colors)):
plt.subplot(2,1,i+1)
print('median ratio for ',labels[i],' = ',np.median(self.sizeratio[flags[i]]))
hist(self.sizeratio[flags[i]],bins=mybins,histtype='stepfilled',color=colors[i],label=labels[i],lw=1.5,alpha=1)#,normed=True)
plt.legend(loc='upper right')
plt.axis([0,2,0,22])
if i < 1:
plt.xticks(([]))
plt.text(-.2,1,'$N_{gal}$',transform=gca().transAxes,verticalalignment='center',rotation=90,fontsize=24)
print('comparing cluster and exterior SF galaxies')
ks(self.sizeratio[flag & self.membflag & ~self.agnflag],self.sizeratio[flag & ~self.membflag & ~self.agnflag])
plt.xlabel('$ R_{24}/R_d $')
if btcut == None:
#plt.ylim(0,20)
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblue.eps')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblue.png')
plt.savefig(figuredir+'fig11a.eps')
else:
#plt.ylim(0,15)
plt.subplot(2,1,1)
plt.title('$ B/T < %2.1f \ Galaxies $'%(btcut),fontsize=20)
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblueBTcut.eps')
#plt.savefig(homedir+'research/LocalClusters/SamplePlots/sizehistblueBTcut.png')
plt.savefig(figuredir+'fig11b.eps')
def plotsize3panel(self,logyscale=False,use_median=True,equal_pop_bins=True):
plt.figure(figsize=(10,10))
plt.subplots_adjust(left=.12,bottom=.1,top=.9,wspace=.02,hspace=.4)
nrow=3
ncol=3
flags=[self.sampleflag, self.sampleflag & self.membflag, self.sampleflag & ~self.membflag]
flags = flags & (self.s.SIGMA_5 > 0.)
x=[self.gim2d.B_T_r,np.log10(self.s.SIGMA_5),self.logstellarmass]
xlabels=['$B/T$','$\log_{10}(\Sigma_5 \ (gal/Mpc^2))$','$\log_{10}(M_\star/M_\odot)$']
colors=[self.logstellarmass,self.gim2d.B_T_r,self.gim2d.B_T_r]
cblabel=['$\log(M_\star/M_\odot)$','$B/T$','$B/T$']
cbticks=[np.arange(8.5,10.8,.4),np.arange(0,1,.2),np.arange(0,1,.2)]
xticklabels=[np.arange(0,1,.2),np.arange(-1.2,2.2,1),np.arange(8.5,11.5,1)]
xlims=[(-.05,.9),(-1.1,1.9),(8.3,11.2)]
v1 = [8.5,0,0]
v2 = [10.8,0.6,0.6]
y=self.sizeratio
yerror=self.sizeratioERR
for i in range(len(x)):
allax=[]
for j in range(3):
plt.subplot(nrow,ncol,3.*i+j+1)
plt.errorbar(x[i][flags[j]],y[flags[j]],yerr=yerror[flags[j]],fmt=None,ecolor='.5',markerfacecolor='white',zorder=1,alpha=.5)
sp=plt.scatter(x[i][flags[j]],y[flags[j]],c=colors[i][flags[j]],vmin=v1[i],vmax=v2[i],cmap='jet',s=40,label='GALFIT',lw=0,alpha=0.7,zorder=1,edgecolors='k')
if j < 3:
(rho,p)=spearman_with_errors(x[i][flags[j]],y[flags[j]],yerror[flags[j]])
ax=plt.gca()
plt.text(.95,.9,r'$\rho = [%4.2f, %4.2f]$'%(np.percentile(rho,16),np.percentile(rho,84)),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
plt.text(.95,.8,'$p = [%5.4f, %5.4f]$'%(np.percentile(p,16),np.percentile(p,84)),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
a=plt.gca()
#plt.axis(limits)
allax.append(a)
if j > 0:
a.set_yticklabels(([]))
if i == 0:
if j == 0:
plt.title('$All $',fontsize=24)
elif j == 1:
plt.title('$Core$',fontsize=24)
elif j == 2:
plt.title('$External$',fontsize=24)
if j == 1:
plt.xlabel(xlabels[i])
if j == 0:
#plt.ylabel('$R_e(24)/Re(r)$')
plt.ylabel('$R_{24}/R_d$')
xbin,ybin,ybinerr, colorbin = binxycolor(x[i][flags[j]],y[flags[j]],colors[i][flags[j]],nbin=5,erry=True,equal_pop_bins=equal_pop_bins,use_median = use_median)
plt.scatter(xbin,ybin,c=colorbin,s=180,vmin=v1[i],vmax=v2[i],cmap='jet',zorder=5,lw=2,edgecolors='k')
plt.errorbar(xbin,ybin,ybinerr,fmt=None,ecolor='k',alpha=0.7)
if logyscale:
a.set_yscale('log')
ylim(.08,6)
else:
ylim(-.1,3.3)
yticks((np.arange(0,4,1)))
xticks(xticklabels[i])
xlim(xlims[i])
#ylim(-.1,2.8)
if j == 2:
c = np.polyfit(xbin,ybin,1)
print('xbin = ', xbin)
print('ybin = ', ybin)
#c = np.polyfit(x[i][flags[j]],y[flags[j]],1)
xl=np.linspace(min(x[i][flags[j]]),max(x[i][flags[j]]),10)
yl = np.polyval(c,xl)
plt.plot(xl,yl,'k--',lw=2)
plt.subplot(nrow,ncol,3.*i+j)
xl=np.linspace(min(x[i][flags[j-1]]),max(x[i][flags[j-1]]),10)
yl = np.polyval(c,xl)
plt.plot(xl,yl,'k--',lw=2)
#print xbin,ybin,colorbin
#if i == 2:
# #text(0.1,0.9,'$External$',transform=a.transAxes,horizontalalignment='left',fontsize=20)
# text(-2.3,1.7,'$R_e(24)/Re(r)$',transform=a.transAxes,rotation=90,horizontalalignment='center',verticalalignment='center',fontsize=26)
c=colorbar(ax=allax,fraction=.02,ticks=cbticks[i])
c.ax.text(6,.5,cblabel[i],rotation=-90,verticalalignment='center',fontsize=20)
savefig(figuredir+'fig12.pdf')
def plotsizestellarmass(self,plotsingle=True,btmax=None,btmin=None,equal_pop_bins=True,use_median=True):
if plotsingle:
plt.figure(figsize=(7,6))
plt.subplots_adjust(bottom=.15,left=.15)
flags = [self.sampleflag & self.membflag,self.sampleflag & ~self.membflag]
if btmax != None:
flags = flags & (self.gim2d.B_T_r < btmax)
if btmin != None:
flags = flags & (self.gim2d.B_T_r > btmin)
colors = ['r','b']
for i in range(len(flags)):
#plot(self.logstellarmass[flags[i]],self.sizeratio[flags[i]],'ro',color=colors[i],alpha=0.5)
plot(self.logstellarmass[flags[i]],self.sizeratio[flags[i]],'ro',color=colors[i],alpha=0.5)
errorbar(self.logstellarmass[flags[i]],self.sizeratio[flags[i]],self.sizeratioERR[flags[i]],fmt=None,ecolor='0.5',alpha=0.5)
flag = flags[i]
if btmax != None:
flag = flag & (self.logstellarmass > 9.1) & (self.logstellarmass < 10.5)
xbin,ybin,ybinerr,colorbin = binxycolor(self.logstellarmass[flag],self.sizeratio[flag],self.gim2d.B_T_r[flag],erry=True,nbin=5,equal_pop_bins=equal_pop_bins,use_median=use_median)
#print xbin
plot(xbin,ybin,'ro',color=colors[i],markersize=18,mec='k',zorder=5)
#scatter(xbin,ybin,s=200, c=colorbin,marker='^',vmin=0,vmax=0.6,cmap='jet')
errorbar(xbin,ybin,ybinerr,fmt=None,ecolor='k',alpha=0.7)
#colorbar(label='$B/T$')
xlabel('$ \log_{10}(M_\star /M_\odot) $',fontsize=22)
ylabel('$ R_{24}/R_d $',fontsize=22)
#rho,p=spearman(self.logstellarmass[flag],self.sizeratio[flag])
#ax=plt.gca()
#plt.text(.95,.9,r'$\rho = %4.2f$'%(rho),horizontalalignment='right',transform=ax.transAxes,fontsize=18)
#plt.text(.95,.8,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=18)
plt.legend(['$Core$','$<Core>$','$External$','$<External>$'],numpoints=1)
s=''
if btmax != None:
s = '$B/T \ < \ %.2f$'%(btmax)
if btmin != None:
s = '$B/T \ > \ %.2f$'%(btmin)
if (btmax != None) & (btmin != None):
s = '$%.2f < B/T \ < \ %.2f$'%(btmin,btmax)
plt.title(s,fontsize=20)
plt.axis([8.6,10.9,-.1,2.9])
plt.savefig(figuredir+'fig13.pdf')
def plotsizeHIfrac(self,sbcutobs=20.5,isoflag=0,r90flag=0,color_BT=False):
plt.figure(figsize=plotsize_single)
plt.subplots_adjust(bottom=.2,left=.15)
plt.clf()
flag = self.sampleflag & (self.HIflag) #& self.dvflag #& ~self.agnflag
print('number of galaxies = ',sum(flag))
y=(self.sizeratio[flag & self.membflag])
x=np.log10(self.s.HIMASS[flag & self.membflag])-self.logstellarmass[flag & self.membflag]
print('spearman for cluster galaxies only')
t = spearman(x,y)
if color_BT:
pointcolor = self.gim2d.B_T_r
v1=0
v2=0.6
else:
pointcolor = self.logstellarmass
v1=mstarmin
v2=mstarmax
#color=self.logstellarmass[flag]
color=pointcolor[flag & self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=v1,vmax=v2,label='$Core$',cmap='jet',edgecolors='k')
y=(self.sizeratio[flag & ~self.membflag])
x=np.log10(self.s.HIMASS[flag & ~self.membflag])-self.logstellarmass[flag & ~self.membflag]
print('spearman for exterior galaxies only')
t = spearman(x,y)
color=pointcolor[flag & ~self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=v1,vmax=v2,marker='s',label='$External$',cmap='jet',edgecolor='k')
y=(self.sizeratio[flag])
x=np.log10(self.s.HIMASS[flag])-self.logstellarmass[flag]
plt.legend(loc='upper left',scatterpoints=1)
errorbar(x,y,self.sizeratioERR[flag],fmt=None,ecolor='.5',zorder=100)
rho,p=spearman(x,y)
ax=plt.gca()
plt.text(.95,.9,r'$\rho = %4.2f$'%(rho),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
plt.text(.95,.8,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
print('spearman for log(M*) < 10.41')
rho,p=spearman(x[color < 10.41],y[color<10.41])
cb = plt.colorbar(sp,fraction=.08,ticks=np.arange(8.5,11,.5))
cb.ax.text(4.,.5,'$\log(M_\star/M_\odot)$',rotation=-90,verticalalignment='center',fontsize=20)
#plt.ylabel(r'$ R_e(24)/R_e(r)$')
plt.ylabel('$R_{24}/R_d$')
plt.xlabel(r'$ \log_{10}(M_{HI}/M_*)$')
ax.tick_params(axis='both', which='major', labelsize=16)
plt.axis([-1.8,1.6,0,2.5])
plt.savefig(figuredir+'fig16a.eps')
def plotsizeHIdef(self,sbcutobs=20.5,isoflag=0,r90flag=0):
figure(figsize=plotsize_single)
plt.subplots_adjust(left=.15,bottom=.2)
clf()
flag = self.sampleflag & (self.HIflag) #& self.membflag #& self.dvflag
print('number of galaxies = ',sum(flag))
y=(self.sizeratio[flag & self.membflag])
x=(self.s.HIDef[flag & self.membflag])
print('spearman for cluster galaxies only')
t = spearman(x,y)
#color=self.logstellarmass[flag]
#color=self.logstellarmass[flag & s.membflag]
colors=self.logstellarmass
color=colors[flag & self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=mstarmin,vmax=mstarmax,label='$Core$',cmap='jet',edgecolor='k')
y=(self.sizeratio[flag & ~self.membflag])
x=(self.s.HIDef[flag & ~self.membflag])
print('spearman for exterior galaxies only')
t = spearman(x,y)
#color=self.logstellarmass[flag]
color=colors[flag & ~self.membflag]
sp=scatter(x,y,s=90,c=color,vmin=8.5,vmax=10.8,marker='s',label='$External$',cmap='jet',edgecolor='k')
y=(self.sizeratio[flag])
x=(self.s.HIDef[flag])
plt.legend(loc='upper left',scatterpoints=1)
errorbar(x,y,self.sizeratioERR[flag],fmt=None,ecolor='.5',zorder=100)
rho,p=spearman(x,y)
ax=plt.gca()
text(.95,.9,r'$\rho = %4.2f$'%(rho),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
text(.95,.8,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
print('spearman for log(M*) < 10.41')
rho,p=spearman(x[color < 10.41],y[color<10.41])
cb = plt.colorbar(sp,fraction=.08,ticks=np.arange(8.5,11,.5))
cb.ax.text(4.,.5,'$\log(M_\star/M_\odot)$',rotation=-90,verticalalignment='center',fontsize=20)
plt.ylabel('$R_{24}/R_d$')
plt.xlabel('$HI \ Deficiency$')#,fontsize=26)
plt.axis([-.6,1.6,0,2.5])
plt.savefig(figuredir+'fig16b.eps')
def plotNUVrsize(self):
plt.figure(figsize=(10,4))
plt.subplots_adjust(left=.1,wspace=.01,bottom=.2,right=.9)
BTmin = 0
BTmax = 0.4
flags = [self.sampleflag, self.sampleflag & self.membflag,self.sampleflag & ~self.membflag]
labels = ['$All$','$Core$','$External$']
allax=[]
for i in range(3):
plt.subplot(1,3,i+1)
plt.scatter(self.sizeratio[flags[i]],self.NUVr[flags[i]],c=self.gim2d.B_T_r[flags[i]],s=60,cmap='jet',vmin=BTmin,vmax=BTmax,edgecolors='k')
if i == 0:
plt.ylabel('$NUV-r$',fontsize=24)
else:
plt.gca().set_yticklabels(([]))
text(0.98,0.9,labels[i],transform=gca().transAxes,horizontalalignment='right',fontsize=20)
(rho,p)=spearman_with_errors(self.NUVr[flags[i]],self.sizeratio[flags[i]],self.sizeratioERR[flags[i]])
ax=plt.gca()
plt.text(.05,.1,r'$\rho = [%4.2f, %4.2f]$'%(np.percentile(rho,16),np.percentile(rho,84)),horizontalalignment='left',transform=ax.transAxes,fontsize=12)
plt.text(.05,.03,'$p = [%5.4f, %5.4f]$'%(np.percentile(p,16),np.percentile(p,84)),horizontalalignment='left',transform=ax.transAxes,fontsize=12)
plt.axhline(y=4,ls='-',color='0.5')
plt.axhline(y=4.5,ls='--',color='0.5')
plt.axhline(y=3.5,ls='--',color='0.5')
allax.append(plt.gca())
plt.xticks(np.arange(0,4))
plt.axis([-0.3,3.1,0,6.2])
colorlabel='$B/T$'
c=plt.colorbar(ax=allax,fraction=.02,ticks = np.arange(0,.5,.1))
c.ax.text(3.5,.5,colorlabel,rotation=-90,verticalalignment='center',fontsize=20)
plt.text(-.51,-.2,'$R_{24}/R_d $',transform=plt.gca().transAxes,fontsize=24,horizontalalignment='center')
plt.savefig(figuredir+'fig17.eps')
def plotsizevsMclallwhisker(sbcutobs=20,masscut=None,drcut=1.,blueflag=False,usetemp=False,useM500=False,usesigma=False,bwflag=True,btcut=None):
plt.figure(figsize=(10,8))
plt.subplots_adjust(hspace=.02,wspace=.02,bottom=.15,left=.15)
i=0
x1=[]
y1=[]
y2all=[]
y3all=[]
for cl in clusternamesbylx:
flag = (g.s.CLUSTER == cl) & g.sampleflag & g.membflag & ~g.agnflag
if btcut != None:
flag = flag & (g.gim2d.B_T_r < btcut)#& ~s.blueflag
print('number in ',cl,' = ',sum(flag))
if masscut != None:
flag=flag & (g.logstellarmass < masscut)
if usetemp:
x=float(clusterTx[cl])
elif useM500:
x=clusterXray[cl][1] # M500
elif usesigma:
x=log10(clustersigma[cl])
else:
x=log10(clusterLx[cl])+44
y=(g.sizeratio[flag])
y2=(g.size_ratio_corr[flag])
BT=mean(g.gim2d.B_T_r[flag & g.gim2dflag])
erry=std(g.sizeratioERR[flag])/sum(flag)
#plot(x,median(y2),'k.',label='_nolegend_')
if x > -99: #check for temp data, which is negative if not available
print(x, y)
if bwflag:
plt.plot(x,median(y),'k.',color='k',marker=shapes[i],markersize=18,label=cl)
bp = plt.boxplot([y],positions=[x],whis=99)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='black', marker='+')
plt.setp(bp['medians'], color='black')
else:
plt.plot(x,median(y),'k.',color=colors[i],marker=shapes[i],markersize=20,label=cl)
plt.boxplot([y],positions=[x],whis=99)
x1.append(x)
y1.append(median(y))
y2all.append(median(y2))
y3all.append(mean(y2))
#errorbar(x,y,yerr=erry,fmt=None,ecolor=colors[i])
#plot(x,BT,'b^',markersize=15)
i+=1
plt.legend(loc='upper right',numpoints=1,markerscale=.6)
flag = g.sampleflag & ~g.membflag & ~g.agnflag #& ~s.dvflag
exteriorvalue=mean(g.sizeratio[flag])
errexteriorvalue=std(g.sizeratio[flag])/sqrt(1.*sum(flag))
plt.axhline(y=exteriorvalue,color='0.5',ls='-')
plt.axhline(y=exteriorvalue+errexteriorvalue,color='0.5',ls='--')
plt.axhline(y=exteriorvalue-errexteriorvalue,color='0.5',ls='--')
#print 'size corrected by B/A'
#spearman(x1,y2all)
#print y1
#print y2all
#print 'size corrected by B/A, mean'
#spearman(x1,y3all)
ax=plt.gca()
#ax.set_xscale('log')
#xl=arange(41,45,.1)
#yl=-.3*(xl-43.)+.64
#plot(xl,yl,'k--')
if usetemp:
plt.xlabel('$ T_X (kev)$',fontsize = 28)
else:
plt.xlabel('$ log_{10}(L_X \ erg \ s^{-1} )$',fontsize = 28)
plt.ylabel('$R_{24}/R_d$',fontsize = 28)
if usetemp:
plt.xticks(np.arange(0,10.,1))
plt.axis([-.05,10.5,0.,1.2])
ax.tick_params(axis='both', which='major', labelsize=16)
elif useM500:
plt.axis([-.75,5.5,0.,1.2])
ax.tick_params(axis='both', which='major', labelsize=16)
elif usesigma:
#axis([2,3.5,0.,1.2])
ax.tick_params(axis='both', which='major', labelsize=16)
#xticks(arange(2,4,.5))#,['','44','45'])
else:
plt.axis([42.5,45.5,-.1,2.8])
plt.xticks(np.arange(43,46),['43','44','45'])
ax.tick_params(axis='both', which='major', labelsize=16)
plt.savefig(figuredir+'fig14.eps')
def plotsigmaLx(bwflag=True):
plt.figure(figsize=[7,6])
plt.clf()
plt.subplots_adjust(left=.16,bottom=.16,right=.95,top=.95,wspace=.3)
i=0
x=[]
y=[]
errm=[]
errp=[]
for cl in clusternamesbylx:
if bwflag:
plt.plot(clusterLx[cl],clusterbiweightscale[cl],'ko',marker=shapes[i],markersize=18,mfc=None,label=cl)
else:
plt.plot(clusterLx[cl],clusterbiweightscale[cl],'ko',color=colors[i],marker=shapes[i],markersize=16,label=cl)
errm.append(clusterbiweightscale_errm[cl])
errp.append(clusterbiweightscale_errp[cl])
x.append(clusterLx[cl])
y.append(clusterbiweightscale[cl])
i += 1
errm=array(errm)
errp=array(errp)
yerror=array(list(zip(errm, errp)),'f')
#print 'yerror = ',yerror
errorbar(x,y,yerr=yerror.T,fmt=None,ecolor='k')
# plot comparison sample
mah = fits.getdata(homedir+'/github/LCS/tables/Mahdavi2001/systems.fits')
# correct Lx to convert from H0=50 to H0=71 (divide by 1.96)
# convert bolometric luminosity to L in 0.1-2.4 kev band, which is what I use in the figure
# this conversion depends on temperature, ranges from 1.44 - 4.05; using 1.4 as a typical value
# this also brings coma into agreement
plt.plot(10.**(mah.logLXbol-44.)/1.96/1.4,10.**mah.logsigma,'k.',c='0.5',alpha=0.5)
plt.gca().set_xscale('log')
plt.xlabel('$L_X \ (10^{44} \ erg/s)$',fontsize=26)
plt.ylabel('$\sigma \ (km/s) $',fontsize=26)
plt.axis([.04,10,300,1100])
leg=plt.legend(numpoints=1,loc='upper left',scatterpoints=1,markerscale=.6,borderpad=.2,labelspacing=.2,handletextpad=.2,prop={'size':14})
gca().tick_params(axis='both', which='major', labelsize=16)
plt.savefig(figuredir+'fig1.eps')
def plotpositionson24(plotsingle=0,plotcolorbar=1,plotnofit=0,useirsb=0):
plt.figure(figsize=(10,8))
plt.subplots_adjust(hspace=.02,wspace=.02,left=.12,bottom=.12,right=.85)
i=1
allax=[]
for cl in clusternamesbylx:
plt.subplot(3,3,i)
infile=homedir+'/github/LCS/tables/clustertables/'+cl+'_NSAmastertable.fits'
d=fits.getdata(infile)
#print cl, i
ra=g.s.RA-clusterRA[cl]
dec=g.s.DEC-clusterDec[cl]
r200=2.02*(clusterbiweightscale[cl])/1000./sqrt(OmegaL+OmegaM*(1.+clusterz[cl])**3)*H0/70. # in Mpc
r200deg=r200*1000./(cosmo.angular_diameter_distance(clusterbiweightcenter[cl]/3.e5).value*Mpcrad_kpcarcsec)/3600.
cir=Circle((0,0),radius=r200deg,color='None',ec='k')
gca().add_patch(cir)
flag=(g.s.CLUSTER == cl) & g.dvflag
plt.hexbin(d.RA-clusterRA[cl],d.DEC-clusterDec[cl],cmap=cm.Greys,gridsize=40,vmin=0,vmax=10)
if plotnofit:
flag=g.sfsampleflag & ~g.sampleflag & g.dvflag & (g.s.CLUSTER == cl)
plot(ra[flag],dec[flag],'rv',mec='r',mfc='None')
flag=g.sampleflag & g.dvflag & (g.s.CLUSTER == cl)
#print cl, len(ra[flag]),len(dec[flag]),len(s.s.SIZE_RATIO[flag])
if useirsb:
color=log10(g.sigma_ir)
v1=7.6
v2=10.5
colormap=cm.jet
else:
color=g.s.SIZE_RATIO
v1=.1
v2=1
colormap='jet_r'
try:
plt.scatter(ra[flag],dec[flag],s=30,c=color[flag],cmap=colormap,vmin=v1,vmax=v2,edgecolors='k')
except ValueError:
plt.scatter(ra[flag],dec[flag],s=30,c='k',cmap=cm.jet_r,vmin=.1,vmax=1,edgecolors='k')
ax=plt.gca()
fsize=14
t=cluster24Box[cl]
drawbox([t[0]-clusterRA[cl],t[1]-clusterDec[cl],t[2],t[3],t[4]],'g-')
ax=gca()
ax.invert_xaxis()
if plotsingle:
xlabel('$ \Delta RA \ (deg) $',fontsize=22)
ylabel('$ \Delta DEC \ (deg) $',fontsize=22)
legend(numpoints=1,scatterpoints=1)
cname='$'+cl+'$'
text(.1,.8,cname,fontsize=18,transform=ax.transAxes,horizontalalignment='left')
plt.axis([1.8,-1.8,-1.8,1.8])
plt.xticks(np.arange(-1,2,1))
plt.yticks(np.arange(-1,2,1))
allax.append(ax)
multiplotaxes(i)
i+=1
if plotcolorbar:
c=colorbar(ax=allax,fraction=0.05)
c.ax.text(2.2,.5,'$R_{24}/R_d$',rotation=-90,verticalalignment='center',fontsize=20)
plt.text(-.5,-.28,'$\Delta RA \ (deg) $',fontsize=26,horizontalalignment='center',transform=ax.transAxes)
plt.text(-2.4,1.5,'$\Delta Dec \ $',fontsize=26,verticalalignment='center',rotation=90,transform=ax.transAxes,family='serif')
#plt.savefig(homedir+'/research/LocalClusters/SamplePlots/positionson24.eps')
#plt.savefig(homedir+'/research/LocalClusters/SamplePlots/positionson24.png')
plt.savefig(figuredir+'fig3.eps')
def plotRe24vsReall(sbcutobs=20,plotcolorbar=0,fixPA=False,logyflag=False,usedr=False):
figure(figsize=(10,8))
subplots_adjust(hspace=.02,wspace=.02,left=.15,bottom=.15,right=.9,top=.9)
i=1
allax=[]
for cl in clusternamesbylx:
plt.subplot(3,3,i)
flag = (g.s.CLUSTER == cl) & g.sampleflag
g.plotRe24vsRe(plotsingle=0,usemyflag=1,myflag=flag,sbcutobs=sbcutobs,logy=logyflag,fixPA=fixPA,usedr=usedr)
ax=plt.gca()
cname='$'+cl+'$'
plt.text(.9,.8,cname,fontsize=18,transform=ax.transAxes,horizontalalignment='right')
allax.append(ax)
multiplotaxes(i)
i+=1
if plotcolorbar:
if usedr:
cblabel = '$\Delta r/R_{200}$'
cblabel='$log_{10}(\sqrt{(\Delta r/R_{200})^2 + (\Delta v/\sigma)^2})$'
else:
cblabel='$log_{10}(M_*/M\odot) $'
plt.colorbar(ax=allax,fraction=0.08,label=cblabel)
plt.text(-.5,-.3,'$R_d \ (kpc)$',fontsize=22,horizontalalignment='center',transform=ax.transAxes)
plt.text(-2.4,1.5,'$R_{24} \ (kpc) $',fontsize=22,verticalalignment='center',rotation=90,transform=ax.transAxes,family='serif')
savefig(figuredir+'fig10.eps')
def plotsizevscluster(masscut=None,btcut=None):
clusters = ['Hercules','A1367','A2052','A2063']
bigmomma = ['Coma']
zflag = np.ones(len(g.sampleflag),'bool')
if masscut != None:
zflag = zflag & (g.logstellarmass < 10.)
if btcut != None:
zflag = zflag & (g.gim2d.B_T_r < btcut)
btcut = .3
flag = zflag & g.sampleflag & g.membflag
groupflag = flag & ((g.s.CLUSTER == 'MKW11') | (g.s.CLUSTER == 'MKW8') | (g.s.CLUSTER == 'AWM4') | (g.s.CLUSTER == 'NGC6107'))
clusterflag = flag & ((g.s.CLUSTER == 'Hercules') | (g.s.CLUSTER == 'A1367') | (g.s.CLUSTER == 'A2052') | (g.s.CLUSTER == 'A2063'))
bigmommaflag = flag & (g.s.CLUSTER == 'Coma')
exteriorflag = zflag & g.sampleflag & (g.gim2d.B_T_r < btcut) & ~g.membflag & ~g.dvflag
nearexteriorflag = zflag & g.sampleflag & (g.gim2d.B_T_r < btcut) & ~g.membflag & g.dvflag
envs = [exteriorflag, nearexteriorflag,groupflag, clusterflag, bigmommaflag]
plt.figure()
ypoint = []
y2 = []
y2err=[]
yerr = []
for i in range(len(envs)):
ypoint.append(np.median(g.sizeratio[envs[i]]))
#ypoint.append(ws.weighted_mean(s.sizeratio[envs[i]],weights=1./s.sizeratioERR[envs[i]]))
yerr.append(np.std(g.sizeratio[envs[i]])/np.sqrt(1.*np.sum(envs[i])))
y2.append(np.median(g.gim2d.B_T_r[envs[i]]))
#ypoint.append(ws.weighted_mean(s.sizeratio[envs[i]],weights=1./s.sizeratioERR[envs[i]]))
y2err.append(np.std(g.gim2d.B_T_r[envs[i]])/np.sqrt(1.*np.sum(envs[i])))
y=g.sizeratio[envs[i]]
plt.plot(i,np.median(y),'ko',markersize=10)
bp = boxplot([y],positions=[i],widths=[.3],whis=99)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='black', marker='+')
plt.setp(bp['medians'], color='black')
ax = plt.gca()
plt.text(.95,.94,'$Far-External: \ \Delta v/\sigma > 3 $',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.text(.95,.86,'$Near-External: \ \Delta v/\sigma < 3$',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.text(.95,.78,'$Group: \ \sigma < 700 \ km/s$',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.text(.95,.70,'$Cluster: \ \sigma > 700 \ km/s$',fontsize=14,transform = ax.transAxes,horizontalalignment='right')
plt.xticks(np.arange(len(envs)),['$Field$', '$Near-Field$', '$Group$', '$Cluster$', '$Coma$'],fontsize=16)
plt.xlim(-.3,len(envs)-.7)
plt.ylim(-.1,2.95)
#plt.legend()
plt.ylabel('$R_{24}/R_d$')
plt.xlabel('$Environment$')
plt.subplots_adjust(bottom=.2,top=.9,left=.15,right=.92)
#plt.subplots_adjust(bottom=.15)
plt.savefig(figuredir+'fig15.eps')
def paperTable1Paper1(sbcutobs=20,masscut=0):
#clustersigma={'MKW11':361, 'MKW8':325., 'AWM4':500., 'A2063':660., 'A2052':562., 'NGC6107':500., 'Coma':1000., 'A1367':745., 'Hercules':689.}
#clusterz={'MKW11':.022849,'MKW8':.027,'AWM4':.031755,'A2063':.034937,'A2052':.035491,'NGC6107':.030658,'Coma':.023,'A1367':.028,'Hercules':.037,'MKW10':.02054}
#clusterbiweightcenter={'MKW11':6897,'MKW8':8045,'AWM4':9636,'A2063':10426,'A2052':10354,'NGC6107':9397,'Coma':7015,'A1367':6507,'Hercules':10941}
#clusterbiweightcenter_errp={'MKW11':45,'MKW8':36,'AWM4':51,'A2063':63,'A2052':64,'NGC6107':48,'Coma':41,'A1367':48,'Hercules':48}
#clusterbiweightcenter_errm={'MK
#outfile=open(homedir+'/Dropbox/Research/MyPapers/LCSpaper1/Table1.tex','w')
outfile=open(figuredir+'Table1.tex','w')
outfile.write('\\begin{deluxetable*}{ccccc} \n')
outfile.write('\\tablecaption{Cluster Properties and Galaxy Sample Sizes \label{finalsample}} \n')
#outfile.write('\\tablehead{\colhead{Cluster} &\colhead{Biweight Central Velocity} & \colhead{Lit.} & \colhead{Biweight Scale} & \colhead{Lit} & \colhead{N$_{spiral}$} & \colhead{N$_{spiral}$} } \n')# % \\\\ & \colhead{(km/s)} & \colhead{(km/s)} & \colhead{(km/s)} & \colhead{(km/s)} & \colhead{Member} & \colhead{External}} \n')
outfile.write('\\tablehead{\colhead{Cluster} &\colhead{Biweight Central Velocity} & \colhead{Biweight Scale} & \colhead{N$_{gal}$} & \colhead{N$_{gal}$} \\\\ & \colhead{(km/s)} & \colhead{(km/s)} & Core & External } \n')
outfile.write('\startdata \n')
for cl in clusternamesbydistance:
nmemb_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & g.membflag)
nnearexterior_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & ~g.membflag & g.dvflag)
nexterior_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & ~g.membflag & ~g.dvflag)
exterior_spiral = sum((g.s.CLUSTER == cl) & g.sampleflag & ~g.membflag)
#tableline='%s & %i$^{%+i}_{-%i}$ & %i & %i$^{+%i}_{-%i}$ & %i & %i & %i & %i \\\\ \n' %(cl, clusterbiweightcenter[cl],clusterbiweightcenter_errp[cl],clusterbiweightcenter_errm[cl],int(round(clusterz[cl]*3.e5)), clusterbiweightscale[cl],clusterbiweightscale_errp[cl],clusterbiweightscale_errm[cl],int(round(clustersigma[cl])),nmemb_spiral,nexterior_spiral)
tableline='%s & %i$^{%+i}_{-%i}$ & %i$^{+%i}_{-%i}$ & %i & %i \\\\ \n' %(cl, clusterbiweightcenter[cl],clusterbiweightcenter_errp[cl],clusterbiweightcenter_errm[cl], clusterbiweightscale[cl],clusterbiweightscale_errp[cl],clusterbiweightscale_errm[cl],nmemb_spiral,exterior_spiral)
outfile.write(tableline)
outfile.write('\enddata \n')
outfile.write('\end{deluxetable*} \n')
outfile.close()
if __name__ == '__main__':
homedir = os.environ['HOME']
g = galaxies(homedir+'/github/LCS/')
#plotsigmaLx() # Fig 1
#plotpositionson24() # Fig 3
#g.plotsizedvdr(plothexbin=True,plotmembcut=False,plotoman=True,plotbadfits=0,hexbinmax=40,colormin=.2,colormax=1.1) # Fig 4
#g.compare_cluster_exterior() # Fig 5
#plotRe24vsReall(logyflag=False) # Fig 10
#g.plotsizehist(colorflag=True) # Fig 11a
#g.plotsizehist(btcut=.3,colorflag=True) # Fig 11b
#g.plotsize3panel(use_median=False,equal_pop_bins=True) # Fig 12
#g.plotsizestellarmass(use_median=False,equal_pop_bins=True,btmax=0.3) # Fig 13
#plotsizevsMclallwhisker(btcut=.3) # Fig 14
#plotsizevscluster(btcut=.3) # Fig 15
#g.plotsizeHIfrac() # Fig 16a
#g.plotsizeHIdef() # Fig 16b
#g.plotNUVrsize() # Fig 17
|
gpl-3.0
|
peterfpeterson/mantid
|
Framework/PythonInterface/mantid/plots/datafunctions.py
|
3
|
49509
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid package
#
#
import datetime
from itertools import tee
import numpy as np
from matplotlib.collections import PolyCollection, QuadMesh
from matplotlib.container import ErrorbarContainer
from matplotlib.colors import LogNorm
from matplotlib.ticker import LogLocator
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.interpolate import interp1d
import mantid.api
import mantid.kernel
from mantid.api import MultipleExperimentInfos, MatrixWorkspace
from mantid.dataobjects import EventWorkspace, MDHistoWorkspace, Workspace2D
from mantid.plots.legend import convert_color_to_hex
from mantid.plots.utility import MantidAxType
# Helper functions for data extraction from a Mantid workspace and plot functionality
# These functions are common between axesfunctions.py and axesfunctions3D.py
# ====================================================
# Validation
# ====================================================
def validate_args(*args, **kwargs):
return len(args) > 0 and (isinstance(args[0], EventWorkspace)
or isinstance(args[0], Workspace2D)
or isinstance(args[0], MDHistoWorkspace)
or isinstance(args[0], MultipleExperimentInfos)
and "LogName" in kwargs)
# ====================================================
# Data extraction and manipulation
# ====================================================
def get_distribution(workspace, **kwargs):
"""
Determine whether or not the data is a distribution.
If the workspace is a distribution return true,
else the value in kwargs wins.
Applies to Matrix workspaces only
:param workspace: :class:`mantid.api.MatrixWorkspace` to extract the data from
"""
distribution = kwargs.pop('distribution', False)
distribution = True if workspace.isDistribution() else bool(distribution)
return distribution, kwargs
def get_normalize_by_bin_width(workspace, axes, **kwargs):
"""
Determine whether or not the workspace should be plotted as a
distribution. If the workspace is a distribution return False, else,
if there is already a curves on the axes, return according to
whether those curves are distributions. Else go by the global
setting.
:param workspace: :class:`mantid.api.MatrixWorkspace` workspace being plotted
:param axes: The axes being plotted on
"""
normalize_by_bin_width = kwargs.pop('normalize_by_bin_width', None)
if normalize_by_bin_width is not None:
return normalize_by_bin_width, kwargs
distribution = kwargs.get('distribution', None)
axis = kwargs.get('axis', None)
if axis == MantidAxType.BIN or distribution or \
(hasattr(workspace, 'isDistribution') and workspace.isDistribution()):
return False, kwargs
elif distribution is False:
return True, kwargs
else:
try:
current_artists = axes.tracked_workspaces.values()
except AttributeError:
current_artists = None
if current_artists:
current_normalization = any(
[artist[0].is_normalized for artist in current_artists])
normalization = current_normalization
else:
normalization = mantid.kernel.config['graph1d.autodistribution'].lower() == 'on'
return normalization, kwargs
def get_normalization(md_workspace, **kwargs):
"""
Gets the normalization flag of an MDHistoWorkspace. For workspaces
derived similar to MSlice/Horace, one needs to average data, the so-called
"number of events" normalization.
:param md_workspace: :class:`mantid.api.IMDHistoWorkspace` to extract the data from
"""
normalization = kwargs.pop('normalization', md_workspace.displayNormalizationHisto())
return normalization, kwargs
def get_indices(md_workspace, **kwargs):
"""
Gets the indices of an MDHistoWorkspace to select the plane to plot.
Set the legend to provide the selected axes values
:param md_workspace: :class:`mantid.api.IMDHistoWorkspace` to extract the data from
"""
if 'slicepoint' in kwargs and 'indices' in kwargs:
raise ValueError("Must specify either 'slicepoint' or 'indices', not both")
if 'slicepoint' in kwargs:
slicepoint = kwargs.pop('slicepoint')
assert md_workspace.getNumDims() == len(
slicepoint), "slicepoint provided do not match the dimensions of the workspace"
indices = []
for n, p in enumerate(slicepoint):
if p is None:
indices.append(slice(None))
else:
indices.append(pointToIndex(md_workspace.getDimension(n), p))
indices = tuple(indices)
elif 'indices' in kwargs:
indices = kwargs.pop('indices')
assert md_workspace.getNumDims() == len(
indices), "indices provided do not match the dimensions of the workspace"
else:
indices = None
if indices and 'label' not in kwargs:
ws_name = md_workspace.name()
labels = '; '.join('{0}={1:.4}'.format(md_workspace.getDimension(n).name,
(md_workspace.getDimension(n).getX(indices[n])
+ md_workspace.getDimension(n).getX(indices[n] + 1)) / 2)
for n in range(md_workspace.getNumDims()) if indices[n] != slice(None))
if ws_name:
kwargs['label'] = '{0}: {1}'.format(ws_name, labels)
else:
kwargs['label'] = labels
return indices, kwargs
def pointToIndex(dim, point):
"""
Finds the bin index of which the point falls into.
"""
i = (point - dim.getX(0)) / dim.getBinWidth()
return int(min(max(i, 0), dim.getNBins() - 1))
def points_from_boundaries(input_array):
"""
The function returns bin centers from bin boundaries
:param input_array: a :class:`numpy.ndarray` of bin boundaries
"""
assert isinstance(input_array, np.ndarray), 'Not a numpy array'
if len(input_array) < 2:
raise ValueError('could not get centers from less than two boundaries')
return .5 * (input_array[0:-1] + input_array[1:])
def _dim2array(d):
"""
Create a numpy array containing bin centers along the dimension d
:param d: an :class:`mantid.geometry.IMDDimension` object
returns: bin boundaries for dimension d
"""
dmin = d.getMinimum()
dmax = d.getMaximum()
return np.linspace(dmin, dmax, d.getNBins() + 1)
def get_wksp_index_dist_and_label(workspace, axis=MantidAxType.SPECTRUM, **kwargs):
"""
Get workspace index, whether the workspace is a distribution,
and label for the spectrum
:param workspace: a Workspace2D or an EventWorkspace
:param axis: The axis on which we're operating
:param kwargs: Keyword arguments passed to the plot function, passed by reference as it is mutated
"""
# get the special arguments out of kwargs
workspace_index, spectrum_number, kwargs = _get_wksp_index_and_spec_num(workspace, axis, **kwargs)
# create a label if it isn't already specified
if 'label' not in kwargs:
ws_name = workspace.name()
if axis == MantidAxType.SPECTRUM:
if workspace.getAxis(1).isText() or workspace.getAxis(1).isNumeric():
kwargs['label'] = '{0}: {1}'.format(ws_name, workspace.getAxis(1).label(workspace_index))
else:
if ws_name:
kwargs['label'] = '{0}: spec {1}'.format(ws_name, spectrum_number)
else:
kwargs['label'] = 'spec {0}'.format(spectrum_number)
elif axis == MantidAxType.BIN:
if ws_name:
kwargs['label'] = '{0}: bin {1}'.format(ws_name, workspace_index)
else:
kwargs['label'] = 'bin {0}'.format(workspace_index)
(distribution, kwargs) = get_distribution(workspace, **kwargs)
return workspace_index, distribution, kwargs
def _get_wksp_index_and_spec_num(workspace, axis, **kwargs):
"""
Get the workspace index and the spectrum number from the kwargs provided
:param workspace: a Workspace2D or an EventWorkspace
:param axis: The axis on which the workspace is being traversed,
can be either MantidAxType.BIN or MantidAxType.SPECTRUM,
default is MantidAxType.SPECTRUM
:param kwargs: Dict of keyword arguments that should contain either spectrum number
or workspace index
:return The workspace index and the spectrum number
"""
spectrum_number = kwargs.pop('specNum', None)
workspace_index = kwargs.pop('wkspIndex', None)
# error check input parameters
if (spectrum_number is not None) and (workspace_index is not None):
raise RuntimeError('Must specify only specNum or wkspIndex')
if (spectrum_number is None) and (workspace_index is None):
raise RuntimeError('Must specify either specNum or wkspIndex')
# convert the spectrum number to a workspace index and vice versa
if spectrum_number is not None:
try:
workspace_index = workspace.getIndexFromSpectrumNumber(int(spectrum_number))
except RuntimeError:
raise RuntimeError(
'Spectrum Number {0} not found in workspace {1}'.format(spectrum_number, workspace.name()))
elif axis == MantidAxType.SPECTRUM: # Only get a spectrum number if we're traversing the spectra
try:
spectrum_number = workspace.getSpectrum(workspace_index).getSpectrumNo()
except RuntimeError:
raise RuntimeError(
'Workspace index {0} not found in workspace {1}'.format(workspace_index, workspace.name()))
return workspace_index, spectrum_number, kwargs
def get_md_data1d(workspace, normalization, indices=None):
"""
Function to transform data in an MDHisto workspace with exactly
one non-integrated dimension into arrays of bin centers, data,
and error, to be used in 1D plots (plot, scatter, errorbar)
"""
coordinate, data, err = get_md_data(workspace, normalization, indices, withError=True)
assert len(coordinate) == 1, 'The workspace is not 1D'
coordinate = points_from_boundaries(coordinate[0])
return coordinate, data, err
def get_md_data(workspace, normalization, indices=None, withError=False):
"""
Generic function to extract data from an MDHisto workspace
:param workspace: :class:`mantid.api.IMDHistoWorkspace` containing data
:param normalization: if :class:`mantid.api.MDNormalization.NumEventsNormalization`
it will divide intensity by the number of corresponding MDEvents
:param indices: slice indices to select data
:param withError: flag for if the error is calculated. If False, err is returned as None
returns a tuple containing bin boundaries for each dimension, the (maybe normalized)
signal and error arrays
"""
if indices is None:
dims = workspace.getNonIntegratedDimensions()
indices = Ellipsis
else:
dims = [workspace.getDimension(n) for n in range(workspace.getNumDims()) if indices[n] == slice(None)]
dim_arrays = [_dim2array(d) for d in dims]
# get data
data = workspace.getSignalArray()[indices].copy()
if normalization == mantid.api.MDNormalization.NumEventsNormalization:
nev = workspace.getNumEventsArray()[indices]
data /= nev
err = None
if withError:
err2 = workspace.getErrorSquaredArray()[indices].copy()
if normalization == mantid.api.MDNormalization.NumEventsNormalization:
err2 /= (nev * nev)
err = np.sqrt(err2)
data = data.squeeze().T
data = np.ma.masked_invalid(data)
if err is not None:
err = err.squeeze().T
err = np.ma.masked_invalid(err)
return dim_arrays, data, err
def get_spectrum(workspace, wkspIndex, normalize_by_bin_width, withDy=False, withDx=False):
"""
Extract a single spectrum and process the data into a frequency
:param workspace: a Workspace2D or an EventWorkspace
:param wkspIndex: workspace index
:param normalize_by_bin_width: flag to divide the data by bin width. The same
effect can be obtained by running the :ref:`algm-ConvertToDistribution`
algorithm
:param withDy: if True, it will return the error in the "counts", otherwise None
:param with Dx: if True, and workspace has them, it will return errors
in the x coordinate, otherwise None
Note that for workspaces containing bin boundaries, this function will return
the bin centers for x.
To be used in 1D plots (plot, scatter, errorbar)
"""
x = workspace.readX(wkspIndex)
y = workspace.readY(wkspIndex)
dy = None
dx = None
if withDy:
dy = workspace.readE(wkspIndex)
if withDx and workspace.getSpectrum(wkspIndex).hasDx():
dx = workspace.readDx(wkspIndex)
if workspace.isHistogramData():
if normalize_by_bin_width and not workspace.isDistribution():
y = y / (x[1:] - x[0:-1])
if dy is not None:
dy = dy / (x[1:] - x[0:-1])
x = points_from_boundaries(x)
try:
specInfo = workspace.spectrumInfo()
if specInfo.isMasked(wkspIndex):
y[:] = np.nan
except:
pass
y = np.ma.masked_invalid(y)
if dy is not None:
dy = np.ma.masked_invalid(dy)
return x, y, dy, dx
def get_bin_indices(workspace):
"""
Find the bins' indices, without these of the monitors if there is some.
(ie every detector which is not a monitor)
:param workspace: a Workspace2D or an EventWorkspace
:return : the bins' indices as a range if possible, else as a numpy array
"""
total_range = workspace.getNumberHistograms()
try:
spectrum_info = workspace.spectrumInfo()
except:
return range(total_range)
monitors_indices = [index for index in range(total_range)
if spectrum_info.hasDetectors(index) and spectrum_info.isMonitor(index)]
monitor_count = len(monitors_indices)
# If possible, ie the detectors' indices are continuous, we return a range.
# If not, we return a numpy array
range_start = -1
range_end = total_range
is_range = True
for index, monitor_index in enumerate(monitors_indices):
if index == monitor_index:
range_start = monitor_index
else:
if monitor_count - index == total_range - monitor_index and monitors_indices[-1] == total_range - 1:
range_end = monitor_index
else:
is_range = False
break
if is_range:
return range(range_start + 1, range_end)
else:
# the following two lines can be replaced by np.isin when > version 1.7.0 is used on RHEL7
total_range = np.asarray(range(total_range))
indices = np.where(np.in1d(total_range, monitors_indices, invert=True).reshape(total_range.shape))
# this check is necessary as numpy may return a tuple or a plain array based on platform.
indices = indices[0] if isinstance(indices, tuple) else indices
return indices
def get_bins(workspace, bin_index, withDy=False):
"""
Extract a requested bin from each spectrum
:param workspace: a Workspace2D or an EventWorkspace
:param bin_index: the index of a bin
:param withDy: if True, it will return the error in the "counts", otherwise None
"""
indices = get_bin_indices(workspace)
x_values, y_values = [], []
dy = [] if withDy else None
for row_index in indices:
y_data = workspace.readY(int(row_index))
if bin_index < len(y_data):
x_values.append(row_index)
y_values.append(y_data[bin_index])
if withDy:
dy.append(workspace.readE(int(row_index))[bin_index])
dx = None
return x_values, y_values, dy, dx
def get_md_data2d_bin_bounds(workspace, normalization, indices=None, transpose=False):
"""
Function to transform data in an MDHisto workspace with exactly
two non-integrated dimension into arrays of bin boundaries in each
dimension, and data. To be used in 2D plots (pcolor, pcolorfast, pcolormesh)
Note: return coordinates are 1d vectors. Use numpy.meshgrid to generate 2d versions
"""
coordinate, data, _ = get_md_data(workspace, normalization, indices, withError=False)
assert len(coordinate) == 2, 'The workspace is not 2D'
if transpose:
return coordinate[1], coordinate[0], data.T
else:
return coordinate[0], coordinate[1], data
def get_md_data2d_bin_centers(workspace, normalization, indices=None, transpose=False):
"""
Function to transform data in an MDHisto workspace with exactly
two non-integrated dimension into arrays of bin centers in each
dimension, and data. To be used in 2D plots (contour, contourf,
tricontour, tricontourf, tripcolor)
Note: return coordinates are 1d vectors. Use numpy.meshgrid to generate 2d versions
"""
x, y, data = get_md_data2d_bin_bounds(workspace, normalization, indices, transpose)
x = points_from_boundaries(x)
y = points_from_boundaries(y)
return x, y, data
def boundaries_from_points(input_array):
""""
The function tries to guess bin boundaries from bin centers
:param input_array: a :class:`numpy.ndarray` of bin centers
"""
assert isinstance(input_array, np.ndarray), 'Not a numpy array'
if len(input_array) == 0:
raise ValueError('could not extend array with no elements')
if len(input_array) == 1:
return np.array([input_array[0] - 0.5, input_array[0] + 0.5])
return np.concatenate(([(3 * input_array[0] - input_array[1]) * 0.5],
(input_array[1:] + input_array[:-1]) * 0.5,
[(3 * input_array[-1] - input_array[-2]) * 0.5]))
def common_x(arr):
"""
Helper function to check if all rows in a 2d :class:`numpy.ndarray` are identical
"""
return np.all(arr == arr[0, :], axis=(1, 0))
def get_matrix_2d_ragged(workspace, normalize_by_bin_width, histogram2D=False, transpose=False,
extent=None, xbins=100, ybins=100, spec_info=None, maxpooling=False):
if spec_info is None:
try:
spec_info = workspace.spectrumInfo()
except:
spec_info = None
if extent is None:
common_bins = workspace.isCommonBins()
delta = np.finfo(np.float64).max
min_value = np.finfo(np.float64).max
max_value = np.finfo(np.float64).min
for spectrum_index in range(workspace.getNumberHistograms()):
if not (spec_info and spec_info.hasDetectors(spectrum_index) and spec_info.isMonitor(spectrum_index)):
xtmp = workspace.readX(spectrum_index)
if workspace.isHistogramData():
# input x is edges
xtmp = mantid.plots.datafunctions.points_from_boundaries(xtmp)
else:
# input x is centers
pass
min_value = min(min_value, xtmp.min())
max_value = max(max_value, xtmp.max())
diff = np.diff(xtmp)
delta = min(delta, diff.min())
if common_bins:
break
xtmp = workspace.readX(0)
if delta == np.finfo(np.float64).max:
delta = np.diff(xtmp).min()
if min_value == np.finfo(np.float64).max:
min_value = xtmp.min()
if max_value == np.finfo(np.float64).min:
max_value = xtmp.max()
num_edges = int(np.ceil((max_value - min_value) / delta)) + 1
x_centers = np.linspace(min_value, max_value, num=num_edges)
y = mantid.plots.datafunctions.boundaries_from_points(workspace.getAxis(1).extractValues())
else:
x_low, x_high, y_low, y_high = extent[0], extent[1], extent[2], extent[3]
if transpose:
x_low, x_high, y_low, y_high = extent[2], extent[3], extent[0], extent[1]
x_edges = np.linspace(x_low, x_high, int(xbins + 1))
x_centers = mantid.plots.datafunctions.points_from_boundaries(x_edges)
y = np.linspace(y_low, y_high, int(ybins))
counts = interpolate_y_data(workspace, x_centers, y, normalize_by_bin_width, spectrum_info=spec_info,
maxpooling=maxpooling)
if histogram2D and extent is not None:
x = x_edges
elif histogram2D:
x = mantid.plots.datafunctions.boundaries_from_points(x_centers)
else:
x = x_centers
if transpose:
return y.T, x.T, counts.T
else:
return x, y, counts
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def _workspace_indices(y_bins, workspace):
workspace_indices = []
for y in y_bins:
try:
workspace_index = workspace.getAxis(1).indexOfValue(y)
workspace_indices.append(workspace_index)
except IndexError:
workspace_indices.append(-1)
return workspace_indices
def _workspace_indices_maxpooling(y_bins, workspace):
summed_spectra_workspace = _integrate_workspace(workspace)
summed_spectra = summed_spectra_workspace.extractY()
workspace_indices = []
for y_range in pairwise(y_bins):
try:
workspace_range = range(workspace.getAxis(1).indexOfValue(np.math.floor(y_range[0])),
workspace.getAxis(1).indexOfValue(np.math.ceil(y_range[1])))
workspace_index = workspace_range[np.argmax(summed_spectra[workspace_range])]
workspace_indices.append(workspace_index)
except IndexError:
workspace_indices.append(-1)
return workspace_indices
def _integrate_workspace(workspace):
from mantid.api import AlgorithmManager
integration = AlgorithmManager.createUnmanaged("Integration")
integration.initialize()
integration.setAlwaysStoreInADS(False)
integration.setLogging(False)
integration.setChild(True)
integration.setProperty("InputWorkspace", workspace)
integration.setProperty("OutputWorkspace", "__dummy")
integration.execute()
return integration.getProperty("OutputWorkspace").value
def interpolate_y_data(workspace, x, y, normalize_by_bin_width, spectrum_info=None, maxpooling=False):
workspace_indices = _workspace_indices_maxpooling(y, workspace) \
if maxpooling else _workspace_indices(y, workspace)
counts = np.full([len(workspace_indices), x.size], np.nan, dtype=np.float64)
previous_index = -1
index = -1
for workspace_index in workspace_indices:
index += 1
# if workspace axis is beyond limits carry on
if workspace_index == -1:
continue
# avoid repeating calculations
if previous_index == workspace_index:
counts[index, :] = counts[index - 1]
continue
previous_index = workspace_index
if not (spectrum_info and spectrum_info.hasDetectors(workspace_index) and spectrum_info.isMonitor(
workspace_index)):
centers, ztmp, _, _ = get_spectrum(workspace, workspace_index,
normalize_by_bin_width=normalize_by_bin_width,
withDy=False, withDx=False)
interpolation_function = interp1d(centers, ztmp, kind='nearest', bounds_error=False,
fill_value="extrapolate")
# only set values in the range of workspace
x_range = np.where((x >= workspace.readX(workspace_index)[0]) & (x <= workspace.readX(workspace_index)[-1]))
# set values outside x data to nan
counts[index, x_range] = interpolation_function(x[x_range])
counts = np.ma.masked_invalid(counts, copy=False)
return counts
def get_matrix_2d_data(workspace, distribution, histogram2D=False, transpose=False):
'''
Get all data from a Matrix workspace that has the same number of bins
in every spectrum. It is used for 2D plots
:param workspace: Matrix workspace to extract the data from
:param distribution: if False, and the workspace contains histogram data,
the intensity will be divided by the x bin width
:param histogram2D: flag that specifies if the coordinates in the output are
-bin centers (such as for contour) for False, or
-bin edges (such as for pcolor) for True.
Returns x,y,z 2D arrays
'''
try:
workspace.blocksize()
except RuntimeError:
raise ValueError('The spectra are not the same length. Try using pcolor, pcolorfast, or pcolormesh instead')
x = workspace.extractX()
if workspace.getAxis(1).isText():
nhist = workspace.getNumberHistograms()
y = np.arange(nhist)
else:
y = workspace.getAxis(1).extractValues()
z = workspace.extractY()
try:
specInfo = workspace.spectrumInfo()
for index in range(workspace.getNumberHistograms()):
if specInfo.isMasked(index) or specInfo.isMonitor(index):
z[index, :] = np.nan
except:
pass
if workspace.isHistogramData():
if not distribution:
z /= x[:, 1:] - x[:, 0:-1]
if histogram2D:
if len(y) == z.shape[0]:
y = boundaries_from_points(y)
x = np.vstack((x, x[-1]))
else:
x = .5 * (x[:, 0:-1] + x[:, 1:])
if len(y) == z.shape[0] + 1:
y = points_from_boundaries(y)
else:
if histogram2D:
if common_x(x):
x = np.tile(boundaries_from_points(x[0]), z.shape[0] + 1).reshape(z.shape[0] + 1, -1)
else:
x = np.vstack((x, x[-1]))
x = np.array([boundaries_from_points(xi) for xi in x])
if len(y) == z.shape[0]:
y = boundaries_from_points(y)
else:
if len(y) == z.shape[0] + 1:
y = points_from_boundaries(y)
y = np.tile(y, x.shape[1]).reshape(x.shape[1], x.shape[0]).transpose()
z = np.ma.masked_invalid(z)
if transpose:
return y.T, x.T, z.T
else:
return x, y, z
def get_uneven_data(workspace, distribution):
'''
Function to get data for uneven workspace2Ds, such as
that pcolor, pcolorfast, and pcolormesh will plot axis aligned rectangles
:param workspace: a workspace2d
:param distribution: if False, and the workspace contains histogram data,
the intensity will be divided by the x bin width
Returns three lists. Each element in the x list is an array of boundaries
for a spectra. Each element in the y list is a 2 element array with the extents
of a particular spectra. The z list contains arrays of intensities at bin centers
'''
z = []
x = []
y = []
nhist = workspace.getNumberHistograms()
yvals = workspace.getAxis(1).extractValues()
if workspace.getAxis(1).isText():
yvals = np.arange(nhist)
if len(yvals) == nhist:
yvals = boundaries_from_points(yvals)
try:
specInfo = workspace.spectrumInfo()
except:
specInfo = None
for index in range(nhist):
xvals = workspace.readX(index)
zvals = workspace.readY(index)
if workspace.isHistogramData():
if not distribution:
zvals = zvals / (xvals[1:] - xvals[0:-1])
else:
xvals = boundaries_from_points(xvals)
if specInfo and specInfo.hasDetectors(index) and (specInfo.isMasked(index) or specInfo.isMonitor(index)):
zvals = np.full_like(zvals, np.nan, dtype=np.double)
zvals = np.ma.masked_invalid(zvals)
z.append(zvals)
x.append(xvals)
y.append([yvals[index], yvals[index + 1]])
return x, y, z
def get_data_uneven_flag(workspace, **kwargs):
'''
Helper function that allows :meth:`matplotlib.axes.Axes.pcolor`,
:meth:`matplotlib.axes.Axes.pcolorfast`, and :meth:`matplotlib.axes.Axes.pcolormesh`
to plot rectangles parallel to the axes even if the data is not
on a regular grid.
:param workspace: a workspace2d
if axisaligned keyword is available and True or if the workspace does
not have a constant number of bins, it will return true, otherwise false
'''
aligned = kwargs.pop('axisaligned', False)
try:
workspace.blocksize()
except RuntimeError:
aligned = True
return aligned, kwargs
def check_resample_to_regular_grid(ws, **kwargs):
if isinstance(ws, MatrixWorkspace):
aligned = kwargs.pop('axisaligned', False)
if aligned or not ws.isCommonBins():
return True, kwargs
x = ws.readX(0)
difference = np.diff(x)
if x.size > 1 and not np.allclose(difference[:-1], difference[0]):
return True, kwargs
return False, kwargs
# ====================================================
# extract logs
# ====================================================
def get_sample_log(workspace, **kwargs):
LogName = kwargs.pop('LogName')
ExperimentInfo = kwargs.pop('ExperimentInfo', 0)
if isinstance(workspace, MultipleExperimentInfos):
run = workspace.getExperimentInfo(ExperimentInfo).run()
else:
run = workspace.run()
if not run.hasProperty(LogName):
raise ValueError('The workspace does not contain the {} sample log'.format(LogName))
tsp = run[LogName]
try:
units = tsp.units
except UnicodeDecodeError as exc:
mantid.kernel.logger.warning("Error retrieving units for log {}: {}".format(LogName, str(exc)))
units = "unknown"
if not isinstance(tsp, (mantid.kernel.FloatTimeSeriesProperty,
mantid.kernel.Int32TimeSeriesProperty,
mantid.kernel.Int64TimeSeriesProperty)):
raise RuntimeError('This function can only plot Float or Int TimeSeriesProperties objects')
Filtered = kwargs.pop('Filtered', True)
if not Filtered:
# these methods access the unfiltered data
times = tsp.times.astype('datetime64[us]')
y = tsp.value
else:
times = tsp.filtered_times.astype('datetime64[us]')
y = tsp.filtered_value
FullTime = kwargs.pop('FullTime', False)
StartFromLog = kwargs.pop('StartFromLog', False)
if FullTime:
x = times.astype(datetime.datetime)
else:
# Compute relative time, preserving t=0 at run start. Logs can record before
# run start and will have negative time offset
try:
t0 = run.startTime().to_datetime64().astype('datetime64[us]')
except RuntimeError:
mantid.kernel.logger.warning("Workspace has no start time. Assume t0 as first log time.")
t0 = times[0]
if not StartFromLog:
try:
t0 = run['proton_charge'].times.astype('datetime64[us]')[0]
except:
pass # TODO: Maybe raise a warning?
x = (times - t0).astype(float) * 1e-6
return x, y, FullTime, LogName, units, kwargs
# ====================================================
# Plot functionality
# ====================================================
def get_axes_labels(workspace, indices=None, normalize_by_bin_width=True, use_latex=True):
"""
Get axis labels from a Workspace2D or an MDHistoWorkspace
Returns a tuple. The first element is the quantity label, such as "Intensity" or "Counts".
All other elements in the tuple are labels for axes.
Some of them are latex formatted already.
If MDWorkspace then the last element will be the values selected by the indices, to be set as title.
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
:param indices:
:param normalize_by_bin_width: bool: Plotting workspace normalized by bin width
:param use_latex: bool: return y-unit label in Latex form
"""
if isinstance(workspace, MultipleExperimentInfos):
axes_labels = ['Intensity']
title = ''
if indices is None:
dims = workspace.getNonIntegratedDimensions()
else:
dims = []
for n in range(workspace.getNumDims()):
d = workspace.getDimension(n)
if indices[n] == slice(None):
dims.append(d)
else:
title += '{0}={1:.4}; '.format(d.name,
(d.getX(indices[n]) + d.getX(indices[n] + 1)) / 2)
for d in dims:
axis_title = d.name.replace('DeltaE', r'$\Delta E$')
axis_unit = d.getUnits().replace('Angstrom^-1', r'$\AA^{-1}$')
axis_unit = axis_unit.replace('DeltaE', 'meV')
axis_unit = axis_unit.replace('Angstrom', r'$\AA$')
axis_unit = axis_unit.replace('MomentumTransfer', r'$\AA^{-1}$')
axes_labels.append('{0} ({1})'.format(axis_title, axis_unit))
axes_labels.append(title.strip())
else:
# For matrix workspaces, return a tuple of ``(YUnit, <other units>)``
axes_labels = [workspace.YUnitLabel(useLatex=use_latex,
plotAsDistribution=normalize_by_bin_width)]
for index in range(workspace.axes()):
axis = workspace.getAxis(index)
unit = axis.getUnit()
if len(str(unit.symbol())) > 0:
unit = '{} (${}$)'.format(unit.caption(), unit.symbol().latex())
else:
unit = unit.caption()
axes_labels.append(unit)
return tuple(axes_labels)
def get_data_from_errorbar_container(err_cont):
"""Get plot coordinates and errorbar sizes from ErrorbarContainer"""
x_segments = _get_x_errorbar_segments(err_cont)
y_segments = _get_y_errorbar_segments(err_cont)
x, y, x_errs, y_errs = [], [], None, None
if x_segments:
x_errs = []
for vertex in x_segments:
x_errs.append((vertex[1][0] - vertex[0][0]) / 2)
x.append((vertex[0][0] + vertex[1][0]) / 2)
y.append((vertex[0][1] + vertex[1][1]) / 2)
if y_segments:
y_errs = [(vertex[1][1] - vertex[0][1]) / 2 for vertex in y_segments]
else:
y_errs = []
for vertex in y_segments:
y_errs.append((vertex[1][1] - vertex[0][1]) / 2)
x.append((vertex[0][0] + vertex[1][0]) / 2)
y.append((vertex[0][1] + vertex[1][1]) / 2)
return x, y, x_errs, y_errs
def _get_x_errorbar_segments(err_cont):
if err_cont.has_xerr:
return err_cont[2][0].get_segments()
return None
def _get_y_errorbar_segments(err_cont):
if err_cont.has_yerr and not err_cont.has_xerr:
return err_cont[2][0].get_segments()
elif err_cont.has_yerr and err_cont.has_xerr:
return err_cont[2][1].get_segments()
else:
return None
def get_errorbar_bounds(container):
min_x, max_x, min_y, max_y = None, None, None, None
x_segments = _get_x_errorbar_segments(container)
if x_segments:
coords = [array[:, 0] for array in x_segments]
max_x = np.max(coords)
min_x = np.min(coords)
y_segments = _get_y_errorbar_segments(container)
if y_segments:
coords = [array[:, 1] for array in y_segments]
max_y = np.max(coords)
min_y = np.min(coords)
return min_x, max_x, min_y, max_y
def errorbars_hidden(err_container):
"""
Return True if errorbars in ErrorbarContainer are not visible
:param err_container: ErrorbarContainer to find visibility of
"""
if not isinstance(err_container, ErrorbarContainer):
return True
hidden = True
for lines in err_container[1:]:
for line in lines:
hidden = hidden and (not line.get_visible())
return hidden
def set_errorbars_hidden(container, hide):
"""
Set the visibility on all lines in an ErrorbarContainer.
:param hide: Whether or not to hide the errors.
:type hide: bool
"""
if not isinstance(container, ErrorbarContainer):
return
# hide gets inverted below, as matplotlib uses `visible`, which has the opposite logic:
# if hide is True, visible must be False, and vice-versa
for bar_lines in container[1:]:
if bar_lines:
for line in bar_lines:
line.set_visible(not hide)
# ====================================================
# Waterfall plots
# ====================================================
def set_initial_dimensions(ax):
# Set the width and height which are used to calculate the offset percentage for waterfall plots.
# This means that the curves in a waterfall plot are always offset by the same amount, even if the
# plot limits change.
x_lim, y_lim = ax.get_xlim(), ax.get_ylim()
ax.width = x_lim[1] - x_lim[0]
ax.height = y_lim[1] - y_lim[0]
def remove_and_return_errorbar_cap_lines(ax):
# Matplotlib holds the line objects representing errorbar caps in the same list as the actual curves on a plot.
# This causes problems for waterfall plots so here they are removed from the list and placed into a different
# list, which is returned so they can be readded later.
errorbar_cap_lines = []
for line in ax.get_lines():
# The lines with the label "_nolegend_" are either actual curves with errorbars, or errorbar cap lines.
# To check if it is an actual curve, we attempt to find the ErrorbarContainer that matches the line object.
if line.get_label() == "_nolegend_":
line_is_errorbar_cap = True
for container in ax.containers:
if isinstance(container, ErrorbarContainer):
if container[0] == line:
line_is_errorbar_cap = False
break
if line_is_errorbar_cap:
errorbar_cap_lines.append(ax.lines.pop(ax.lines.index(line)))
return errorbar_cap_lines
def set_waterfall_toolbar_options_enabled(ax):
toolbar = ax.get_figure().canvas.toolbar
if toolbar:
toolbar.waterfall_conversion(ax.is_waterfall())
def get_waterfall_fills(ax):
return [collection for collection in ax.collections if isinstance(collection, PolyCollection)]
def waterfall_update_fill(ax):
# Get the colours of each fill so they can be reapplied after updating.
colours = []
for collection in ax.collections:
if isinstance(collection, PolyCollection):
colours.append(collection.get_facecolor())
waterfall_remove_fill(ax)
waterfall_create_fill(ax)
poly_collections = get_waterfall_fills(ax)
line_colours = True
# If there are more fill areas than colours, this means that new curves have been added to the plot
# (overplotting). In which case, we need to determine whether the fill colours are set to match the line
# colours by checking that the colour of each fill that existed previously is the same as the line it belongs
# to. If so, the list of colours is appended to with the colours of the new lines. Otherwise the fills are
# all set to the same colour and so the list of colours is extended with the same colour for each new curve.
if len(poly_collections) > len(colours):
for i in range(len(colours) - 1):
if convert_color_to_hex(colours[i][0]) != ax.get_lines()[i].get_color():
line_colours = False
break
colours_length = len(colours)
if line_colours:
for i in range(colours_length, len(poly_collections)):
colours.append(ax.get_lines()[i].get_color())
else:
for i in range(colours_length, len(poly_collections)):
colours.append(colours[0])
for i, collection in enumerate(poly_collections):
collection.set_color(colours[i])
def apply_waterfall_offset_to_errorbars(ax, line, amount_to_move_x, amount_to_move_y, index):
for container in ax.containers:
# Find the ErrorbarContainer that corresponds to the current line.
if isinstance(container, ErrorbarContainer) and container[0] == line:
# Shift the data line and the errorbar caps
for line in (container[0],) + container[1]:
line.set_xdata(line.get_xdata() + amount_to_move_x)
line.set_ydata(line.get_ydata() + amount_to_move_y)
if index == 0:
line.set_zorder(len(ax.get_lines()))
else:
line.set_zorder(ax.get_lines()[index - 1].get_zorder() - 1)
# Shift the errorbars
for bar_line_col in container[2]:
segments = bar_line_col.get_segments()
for point in segments:
for row in range(2):
point[row][1] += amount_to_move_y
for column in range(2):
point[column][0] += amount_to_move_x
bar_line_col.set_segments(segments)
bar_line_col.set_zorder((len(ax.get_lines()) - index) + 1)
break
def convert_single_line_to_waterfall(ax, index, x=None, y=None, need_to_update_fill=False):
line = ax.get_lines()[index]
amount_to_move_x = index * ax.width * (ax.waterfall_x_offset / 500) if x is None else \
index * ax.width * ((x - ax.waterfall_x_offset) / 500)
amount_to_move_y = index * ax.height * (ax.waterfall_y_offset / 500) if y is None else \
index * ax.height * ((y - ax.waterfall_y_offset) / 500)
if line.get_label() == "_nolegend_":
apply_waterfall_offset_to_errorbars(ax, line, amount_to_move_x, amount_to_move_y, index)
else:
line.set_xdata(line.get_xdata() + amount_to_move_x)
line.set_ydata(line.get_ydata() + amount_to_move_y)
# Ensures the more offset lines are drawn behind the less offset ones
if index == 0:
line.set_zorder(len(ax.get_lines()))
else:
line.set_zorder(ax.get_lines()[index - 1].get_zorder() - 1)
# If the curves are filled and the fill has been set to match the line colour and the line colour has changed
# then the fill's colour is updated.
if need_to_update_fill:
fill = get_waterfall_fill_for_curve(ax, index)
fill.set_color(line.get_color())
def set_waterfall_fill_visible(ax, index):
if not ax.waterfall_has_fill():
return
# Sets the filled area to match the visibility of the line it belongs to.
line = ax.get_lines()[index]
fill = get_waterfall_fill_for_curve(ax, index)
fill.set_visible(line.get_visible())
def get_waterfall_fill_for_curve(ax, index):
# Takes the index of a curve and returns that curve's filled area.
i = 0
for collection in ax.collections:
if isinstance(collection, PolyCollection):
if i == index:
fill = collection
break
i += 1
return fill
def waterfall_fill_is_line_colour(ax):
i = 0
# Check that for each line, the fill area is the same colour as the line.
for collection in ax.collections:
if isinstance(collection, PolyCollection):
line_colour = ax.get_lines()[i].get_color()
poly_colour = convert_color_to_hex(collection.get_facecolor()[0])
if line_colour != poly_colour:
return False
i += 1
return True
def waterfall_create_fill(ax):
if ax.waterfall_has_fill():
return
errorbar_cap_lines = remove_and_return_errorbar_cap_lines(ax)
for i, line in enumerate(ax.get_lines()):
bottom_line = [min(line.get_ydata()) - ((i * ax.height) / 100)] * len(line.get_ydata())
fill = ax.fill_between(line.get_xdata(), line.get_ydata(), bottom_line)
fill.set_zorder((len(ax.get_lines()) - i) + 1)
set_waterfall_fill_visible(ax, i)
ax.lines += errorbar_cap_lines
def waterfall_remove_fill(ax):
ax.collections[:] = filter(lambda x: not isinstance(x, PolyCollection), ax.collections)
ax.get_figure().canvas.draw()
def solid_colour_fill(ax, colour):
# Add the fill areas if there aren't any already.
if not ax.waterfall_has_fill():
waterfall_create_fill(ax)
for i, collection in enumerate(ax.collections):
if isinstance(collection, PolyCollection):
# This function is called every time the colour line edit is changed so it's possible
# that the current input is not a valid colour, such as if the user hasn't finished entering
# a colour. So if setting the colour fails, the function just stops.
try:
collection.set_color(colour)
except:
return
ax.get_figure().canvas.draw()
def line_colour_fill(ax):
# Add the fill areas if there aren't any already.
if not ax.waterfall_has_fill():
waterfall_create_fill(ax)
i = 0
for collection in ax.collections:
if isinstance(collection, PolyCollection):
colour = ax.get_lines()[i].get_color()
collection.set_color(colour)
# Only want the counter to iterate if the current collection is a PolyCollection (the fill areas) since
# the axes may have other collections which can be ignored.
i = i + 1
ax.get_figure().canvas.draw()
def update_colorbar_scale(figure, image, scale, vmin, vmax):
""""
Updates the colorbar to the scale and limits given.
:param figure: A matplotlib figure instance
:param image: The matplotlib image containing the colorbar
:param scale: The norm scale of the colorbar, this should be a matplotlib colormap norm type
:param vmin: the minimum value on the colorbar
:param vmax: the maximum value on the colorbar
"""
if vmin <= 0 and scale == LogNorm:
vmin = 0.0001 # Avoid 0 log scale error
mantid.kernel.logger.warning(
"Scale is set to logarithmic so non-positive min value has been changed to 0.0001.")
if vmax <= 0 and scale == LogNorm:
vmax = 1 # Avoid 0 log scale error
mantid.kernel.logger.warning("Scale is set to logarithmic so non-positive max value has been changed to 1.")
image.set_norm(scale(vmin=vmin, vmax=vmax))
if image.colorbar:
label = image.colorbar._label
image.colorbar.remove()
locator = None
if scale == LogNorm:
locator = LogLocator(subs=np.arange(1, 10))
if locator.tick_values(vmin=vmin, vmax=vmax).size == 0:
locator = LogLocator()
mantid.kernel.logger.warning("Minor ticks on colorbar scale cannot be shown "
"as the range between min value and max value is too large")
figure.subplots_adjust(wspace=0.5, hspace=0.5)
colorbar = figure.colorbar(image, ax=figure.axes, ticks=locator, pad=0.06)
colorbar.set_label(label)
def add_colorbar_label(colorbar, axes):
"""
Adds a label to the colorbar if every axis on the figure has the same label.
:param colorbar: the colorbar to label.
:param axes: the axes that the colorbar belongs to.
"""
colorbar_labels = [ax.colorbar_label for ax in axes if hasattr(ax, 'colorbar_label')]
if colorbar_labels and colorbar_labels.count(colorbar_labels[0]) == len(colorbar_labels):
colorbar.set_label(colorbar_labels[0])
def get_images_from_figure(figure):
"""Return a list of images in the given figure excluding any colorbar images"""
axes = figure.get_axes()
all_images = []
for ax in axes:
all_images += ax.images + [col for col in ax.collections if isinstance(col, QuadMesh)
or isinstance(col, Poly3DCollection)]
# remove any colorbar images
colorbars = [img.colorbar.solids for img in all_images if img.colorbar]
images = [img for img in all_images if img not in colorbars]
return images
def get_axes_from_figure(figure):
"""Return a list of axes in the given figure excluding any colorbar axes"""
images = get_images_from_figure(figure)
axes = [img.axes for img in images]
return axes
def get_legend_handles(ax):
"""
Get a list of the Line2D and ErrorbarContainer objects to be
included in the legend so that the order is always the same.
"""
handles = []
for line in ax.lines:
if line.get_label() == "_nolegend_":
# If the line has no label find the ErrorbarContainer that corresponds to it (if one exists)
for container in ax.containers:
if isinstance(container, ErrorbarContainer) and container[0] == line:
handles.append(container)
break
else:
handles.append(line)
return handles
|
gpl-3.0
|
Eric89GXL/mne-python
|
mne/viz/utils.py
|
2
|
88337
|
# -*- coding: utf-8 -*-
"""Utility functions for plotting M/EEG data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
# Stefan Appelhoff <[email protected]>
# Clemens Brunner <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
import difflib
import webbrowser
import tempfile
import math
import numpy as np
from copy import deepcopy
from distutils.version import LooseVersion
import warnings
from ..defaults import _handle_default
from ..fixes import _get_status
from ..io import show_fiff, Info
from ..io.constants import FIFF
from ..io.pick import (channel_type, channel_indices_by_type, pick_channels,
_pick_data_channels, _DATA_CH_TYPES_SPLIT,
_DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES,
pick_info, _picks_by_type, pick_channels_cov,
_contains_ch_type)
from ..io.meas_info import create_info
from ..rank import compute_rank
from ..io.proj import setup_proj
from ..utils import (verbose, get_config, warn, _check_ch_locs, _check_option,
logger, fill_doc, _pl, _check_sphere, _ensure_int)
from ..selection import (read_selection, _SELECTIONS, _EEG_SELECTIONS,
_divide_to_regions)
from ..transforms import apply_trans
_channel_type_prettyprint = {'eeg': "EEG channel", 'grad': "Gradiometer",
'mag': "Magnetometer", 'seeg': "sEEG channel",
'eog': "EOG channel", 'ecg': "ECG sensor",
'emg': "EMG sensor", 'ecog': "ECoG channel",
'misc': "miscellaneous sensor"}
def _setup_vmin_vmax(data, vmin, vmax, norm=False):
"""Handle vmin and vmax parameters for visualizing topomaps.
For the normal use-case (when `vmin` and `vmax` are None), the parameter
`norm` drives the computation. When norm=False, data is supposed to come
from a mag and the output tuple (vmin, vmax) is symmetric range
(-x, x) where x is the max(abs(data)). When norm=True (a.k.a. data is the
L2 norm of a gradiometer pair) the output tuple corresponds to (0, x).
Otherwise, vmin and vmax are callables that drive the operation.
"""
should_warn = False
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = 0. if norm else -vmax
if vmin == 0 and np.min(data) < 0:
should_warn = True
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = 0. if norm else np.min(data)
if vmin == 0 and np.min(data) < 0:
should_warn = True
if callable(vmax):
vmax = vmax(data)
elif vmax is None:
vmax = np.max(data)
if should_warn:
warn_msg = ("_setup_vmin_vmax output a (min={vmin}, max={vmax})"
" range whereas the minimum of data is {data_min}")
warn_val = {'vmin': vmin, 'vmax': vmax, 'data_min': np.min(data)}
warn(warn_msg.format(**warn_val), UserWarning)
return vmin, vmax
def plt_show(show=True, fig=None, **kwargs):
"""Show a figure while suppressing warnings.
Parameters
----------
show : bool
Show the figure.
fig : instance of Figure | None
If non-None, use fig.show().
**kwargs : dict
Extra arguments for :func:`matplotlib.pyplot.show`.
"""
from matplotlib import get_backend
import matplotlib.pyplot as plt
if show and get_backend() != 'agg':
(fig or plt).show(**kwargs)
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
"""Adjust subplot parameters to give specified padding.
.. note:: For plotting please use this function instead of
``plt.tight_layout``.
Parameters
----------
pad : float
Padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to ``pad_inches``.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to ``pad_inches``.
fig : instance of Figure
Figure to apply changes to.
Notes
-----
This will not force constrained_layout=False if the figure was created
with that method.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
constrained = fig.get_constrained_layout()
if constrained:
return # no-op
try: # see https://github.com/matplotlib/matplotlib/issues/2654
with warnings.catch_warnings(record=True) as ws:
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
try:
with warnings.catch_warnings(record=True) as ws:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
warn('Matplotlib function "tight_layout" is not supported.'
' Skipping subplot adjustment.')
return
for w in ws:
w_msg = str(w.message) if hasattr(w, 'message') else w.get_message()
if not w_msg.startswith('This figure includes Axes'):
warn(w_msg, w.category, 'matplotlib')
def _check_delayed_ssp(container):
"""Handle interactive SSP selection."""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def _validate_if_list_of_axes(axes, obligatory_len=None):
"""Validate whether input is a list/array of axes."""
from matplotlib.axes import Axes
if obligatory_len is not None and not isinstance(obligatory_len, int):
raise ValueError('obligatory_len must be None or int, got %d',
'instead' % type(obligatory_len))
if not isinstance(axes, (list, np.ndarray)):
raise ValueError('axes must be a list or numpy array of matplotlib '
'axes objects, got %s instead.' % type(axes))
if isinstance(axes, np.ndarray) and axes.ndim > 1:
raise ValueError('if input is a numpy array, it must be '
'one-dimensional. The received numpy array has %d '
'dimensions however. Try using ravel or flatten '
'method of the array.' % axes.ndim)
is_correct_type = np.array([isinstance(x, Axes)
for x in axes])
if not np.all(is_correct_type):
first_bad = np.where(np.logical_not(is_correct_type))[0][0]
raise ValueError('axes must be a list or numpy array of matplotlib '
'axes objects while one of the list elements is '
'%s.' % type(axes[first_bad]))
if obligatory_len is not None and not len(axes) == obligatory_len:
raise ValueError('axes must be a list/array of length %d, while the'
' length is %d' % (obligatory_len, len(axes)))
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze.
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of colormap | array
A teal->blue->gray->red->yellow colormap. See docstring of the 'format'
argument for further details.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
""" # noqa: E501
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0)),
'alpha': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.0, 0.0),
(limits[3], 0.0, 0.0),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
@contextmanager
def _events_off(obj):
obj.eventson = False
try:
yield
finally:
obj.eventson = True
def _toggle_proj(event, params, all_=False):
"""Perform operations when proj boxes clicked."""
# read options if possible
if 'proj_checks' in params:
bools = _get_status(params['proj_checks'])
if all_:
new_bools = [not all(bools)] * len(bools)
with _events_off(params['proj_checks']):
for bi, (old, new) in enumerate(zip(bools, new_bools)):
if old != new:
params['proj_checks'].set_active(bi)
bools[bi] = new
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
proj = params.get('apply_proj', True)
bools = [proj] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_channel_plotting_order(order, ch_types, picks=None):
"""Determine channel plotting order for browse-style Raw/Epochs plots."""
if order is None:
# for backward compat, we swap the first two to keep grad before mag
ch_type_order = list(_DATA_CH_TYPES_ORDER_DEFAULT)
ch_type_order = tuple(['grad', 'mag'] + ch_type_order[2:])
order = [pick_idx for order_type in ch_type_order
for pick_idx, pick_type in enumerate(ch_types)
if order_type == pick_type]
elif not isinstance(order, (np.ndarray, list, tuple)):
raise ValueError('order should be array-like; got '
f'"{order}" ({type(order)}).')
if picks is not None:
order = [ch for ch in order if ch in picks]
return np.asarray(order)
def _make_event_color_dict(event_color, events=None, event_id=None):
"""Make or validate a dict mapping event ids to colors."""
from .misc import _handle_event_colors
if isinstance(event_color, dict): # if event_color is a dict, validate it
event_id = dict() if event_id is None else event_id
event_color = {_ensure_int(event_id.get(key, key), 'event_color key'):
value for key, value in event_color.items()}
default = event_color.pop(-1, None)
default_factory = None if default is None else lambda: default
new_dict = defaultdict(default_factory)
for key, value in event_color.items():
if key < 1:
raise KeyError('event_color keys must be strictly positive, '
f'or -1 (cannot use {key})')
new_dict[key] = value
return new_dict
elif event_color is None: # make a dict from color cycle
uniq_events = set() if events is None else np.unique(events[:, 2])
return _handle_event_colors(event_color, uniq_events, event_id)
else: # if event_color is a MPL color-like thing, use it for all events
return defaultdict(lambda: event_color)
def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False,
size=1.3, sharex=False, sharey=False):
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
if n_cells == 1:
nrows = ncols = 1
elif isinstance(ncols, int) and n_cells <= ncols:
nrows, ncols = 1, n_cells
else:
if ncols == 'auto' and nrows == 'auto':
nrows = math.floor(math.sqrt(n_cells))
ncols = math.ceil(n_cells / nrows)
elif ncols == 'auto':
ncols = math.ceil(n_cells / nrows)
elif nrows == 'auto':
nrows = math.ceil(n_cells / ncols)
else:
naxes = ncols * nrows
if naxes < n_cells:
raise ValueError("Cannot plot {} axes in a {} by {} "
"figure.".format(n_cells, nrows, ncols))
if colorbar:
ncols += 1
width = size * ncols
height = (size + max(0, 0.1 * (4 - size))) * nrows + bool(title) * 0.5
height_ratios = None
g_kwargs = {}
figure_nobar(figsize=(width * 1.5, height * 1.5))
gs = GridSpec(nrows, ncols, height_ratios=height_ratios, **g_kwargs)
axes = []
if colorbar:
# exclude last axis of each row except top row, which is for colorbar
exclude = set(range(2 * ncols - 1, nrows * ncols, ncols))
ax_idxs = sorted(set(range(nrows * ncols)) - exclude)[:n_cells + 1]
else:
ax_idxs = range(n_cells)
for ax_idx in ax_idxs:
subplot_kw = dict()
if ax_idx > 0:
if sharex:
subplot_kw.update(sharex=axes[0])
if sharey:
subplot_kw.update(sharey=axes[0])
axes.append(plt.subplot(gs[ax_idx], **subplot_kw))
fig = axes[0].get_figure()
return fig, axes, ncols, nrows
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog."""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
params.get('proj_bools', [params['apply_proj']] * len(projs)))
width = max([4., max([len(p['desc']) for p in projs]) / 6.0 + 0.5])
height = (len(projs) + 1) / 6.0 + 1.5
fig_proj = figure_nobar(figsize=(width, height))
_set_window_title(fig_proj, 'SSP projection vectors')
offset = (1. / 6. / height)
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, offset, 1, 0.8 - offset), frameon=False)
ax_temp.set_title('Projectors marked with "X" are active')
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# make edges around checkbox areas
for rect in proj_checks.rectangles:
rect.set_edgecolor('0.5')
rect.set_linewidth(1.)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active']:
for x in proj_checks.lines[ii]:
x.set_color('#ff0000')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
fig_proj.canvas.mpl_connect('key_press_event', _key_press)
# Toggle all
ax_temp = fig_proj.add_axes((0, 0, 1, offset), frameon=False)
proj_all = widgets.Button(ax_temp, 'Toggle all')
proj_all.on_clicked(partial(_toggle_proj, params=params, all_=True))
params['proj_all'] = proj_all
# this should work for non-test cases
try:
fig_proj.canvas.draw()
plt_show(fig=fig_proj, warn=False)
except Exception:
pass
def _simplify_float(label):
# Heuristic to turn floats to ints where possible (e.g. -500.0 to -500)
if isinstance(label, float) and np.isfinite(label) and \
float(str(label)) != round(label):
label = round(label, 2)
return label
def _get_figsize_from_config():
"""Get default / most recent figure size from config."""
figsize = get_config('MNE_BROWSE_RAW_SIZE')
if figsize is not None:
figsize = figsize.split(',')
figsize = tuple([float(s) for s in figsize])
return figsize
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff.
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
%(verbose)s
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'wb')
else:
f = tempfile.NamedTemporaryFile('wb', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff.encode('utf-8'))
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar.
Parameters
----------
*args : list
Arguments to pass to :func:`matplotlib.pyplot.figure`.
**kwargs : dict
Keyword arguments to pass to :func:`matplotlib.pyplot.figure`.
Returns
-------
fig : instance of Figure
The figure.
"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
finally:
rcParams['toolbar'] = old_val
return fig
def _show_help(col1, col2, width, height):
fig_help = figure_nobar(figsize=(width, height), dpi=80)
_set_window_title(fig_help, 'Help')
ax = fig_help.add_subplot(111)
celltext = [[c1, c2] for c1, c2 in zip(col1.strip().split("\n"),
col2.strip().split("\n"))]
table = ax.table(cellText=celltext, loc="center", cellLoc="left")
table.auto_set_font_size(False)
table.set_fontsize(12)
ax.set_axis_off()
for (row, col), cell in table.get_celld().items():
cell.set_edgecolor(None) # remove cell borders
# right justify, following:
# https://stackoverflow.com/questions/48210749/matplotlib-table-assign-different-text-alignments-to-different-columns?rq=1 # noqa: E501
if col == 0:
cell._loc = 'right'
fig_help.canvas.mpl_connect('key_press_event', _key_press)
# this should work for non-test cases
try:
fig_help.canvas.draw()
plt_show(fig=fig_help, warn=False)
except Exception:
pass
def _key_press(event):
"""Handle key press in dialog."""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(event.canvas.figure)
class ClickableImage(object):
"""Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata : ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
import matplotlib.pyplot as plt
self.coords = []
self.imdata = imdata
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata,
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
plt_show(block=True)
def onclick(self, event):
"""Handle Mouse clicks.
Parameters
----------
event : matplotlib.backend_bases.Event
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
import matplotlib.pyplot as plt
if len(self.coords) == 0:
raise ValueError('No coordinates found, make sure you click '
'on the image that is first shown.')
f, ax = plt.subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='#ff0000')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='#ff0000')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt_show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout.
Returns
-------
layout : instance of Layout
The layout.
"""
from ..channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax', button=1, kind='press'):
"""Fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
assert xform == 'pix'
x, y = point
if kind == 'press':
func = partial(fig.canvas.button_press_event, x=x, y=y, button=button)
elif kind == 'release':
func = partial(fig.canvas.button_release_event, x=x, y=y,
button=button)
elif kind == 'motion':
func = partial(fig.canvas.motion_notify_event, x=x, y=y)
func(guiEvent=None)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in ``im`` to the
figure ``fig``. This is generally meant to
be done with topo plots, though it could work
for any plot.
.. note:: This modifies the figure and/or axes in place.
Parameters
----------
fig : Figure
The figure you wish to add a bg image to.
im : array, shape (M, N, {3, 4})
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
set_ratios : None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im : instance of Axes
Axes created corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if im is None:
# Don't do anything and return nothing
return None
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1], label='background')
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
def _find_peaks(evoked, npeaks):
"""Find peaks from evoked data.
Returns ``npeaks`` biggest peaks as a list of time points.
"""
from scipy.signal import argrelmax
gfp = evoked.data.std(axis=0)
order = len(evoked.times) // 30
if order < 1:
order = 1
peaks = argrelmax(gfp, order=order, axis=0)[0]
if len(peaks) > npeaks:
max_indices = np.argsort(gfp[peaks])[-npeaks:]
peaks = np.sort(peaks[max_indices])
times = evoked.times[peaks]
if len(times) == 0:
times = [evoked.times[gfp.argmax()]]
return times
def _process_times(inst, use_times, n_peaks=None, few=False):
"""Return a list of times for topomaps."""
if isinstance(use_times, str):
if use_times == 'interactive':
use_times, n_peaks = 'peaks', 1
if use_times == 'peaks':
if n_peaks is None:
n_peaks = min(3 if few else 7, len(inst.times))
use_times = _find_peaks(inst, n_peaks)
elif use_times == 'auto':
if n_peaks is None:
n_peaks = min(5 if few else 10, len(use_times))
use_times = np.linspace(inst.times[0], inst.times[-1], n_peaks)
else:
raise ValueError("Got an unrecognized method for `times`. Only "
"'peaks', 'auto' and 'interactive' are supported "
"(or directly passing numbers).")
elif np.isscalar(use_times):
use_times = [use_times]
use_times = np.array(use_times, float)
if use_times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions'
% use_times.ndim)
if len(use_times) > 25:
warn('More than 25 topomaps plots requested. This might take a while.')
return use_times
@verbose
def plot_sensors(info, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True, axes=None,
block=False, show=True, sphere=None, verbose=None):
"""Plot sensors positions.
Parameters
----------
info : instance of Info
Info structure containing the channel locations.
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d', 'select'.
If 'select', a set of channels can be selected interactively by using
lasso selector or clicking while holding control key. The selected
channels are returned along with the figure instance. Defaults to
'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad, eeg,
seeg and ecog channels are plotted. If None (default), then channels
are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to
``'Sensor positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above the
subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an instance
of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed. Defaults
to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from .evoked import _rgb
_check_option('kind', kind, ['topomap', '3d', 'select'])
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info not %s' % type(info))
ch_indices = channel_indices_by_type(info)
allowed_types = _DATA_CH_TYPES_SPLIT
if ch_type is None:
for this_type in allowed_types:
if _contains_ch_type(info, this_type):
ch_type = this_type
break
picks = ch_indices[ch_type]
elif ch_type == 'all':
picks = list()
for this_type in allowed_types:
picks += ch_indices[this_type]
elif ch_type in allowed_types:
picks = ch_indices[ch_type]
else:
raise ValueError("ch_type must be one of %s not %s!" % (allowed_types,
ch_type))
if len(picks) == 0:
raise ValueError('Could not find any channels of type %s.' % ch_type)
chs = [info['chs'][pick] for pick in picks]
if not _check_ch_locs(chs):
raise RuntimeError('No valid channel positions found')
dev_head_t = info['dev_head_t']
pos = np.empty((len(chs), 3))
for ci, ch in enumerate(chs):
pos[ci] = ch['loc'][:3]
if ch['coord_frame'] == FIFF.FIFFV_COORD_DEVICE:
if dev_head_t is None:
warn('dev_head_t is None, transforming MEG sensors to head '
'coordinate frame using identity transform')
dev_head_t = np.eye(4)
pos[ci] = apply_trans(dev_head_t, pos[ci])
del dev_head_t
ch_names = np.array([ch['ch_name'] for ch in chs])
bads = [idx for idx, name in enumerate(ch_names) if name in info['bads']]
if ch_groups is None:
def_colors = _handle_default('color')
colors = ['red' if i in bads else def_colors[channel_type(info, pick)]
for i, pick in enumerate(picks)]
else:
if ch_groups in ['position', 'selection']:
if ch_groups == 'position':
ch_groups = _divide_to_regions(info, add_stim=False)
ch_groups = list(ch_groups.values())
else:
ch_groups, color_vals = list(), list()
for selection in _SELECTIONS + _EEG_SELECTIONS:
channels = pick_channels(
info['ch_names'], read_selection(selection, info=info))
ch_groups.append(channels)
color_vals = np.ones((len(ch_groups), 4))
for idx, ch_group in enumerate(ch_groups):
color_picks = [np.where(picks == ch)[0][0] for ch in ch_group
if ch in picks]
if len(color_picks) == 0:
continue
x, y, z = pos[color_picks].T
color = np.mean(_rgb(x, y, z), axis=0)
color_vals[idx, :3] = color # mean of spatial color
else:
import matplotlib.pyplot as plt
colors = np.linspace(0, 1, len(ch_groups))
color_vals = [plt.cm.jet(colors[i]) for i in range(len(ch_groups))]
if not isinstance(ch_groups, (np.ndarray, list)):
raise ValueError("ch_groups must be None, 'position', "
"'selection', or an array. Got %s." % ch_groups)
colors = np.zeros((len(picks), 4))
for pick_idx, pick in enumerate(picks):
for ind, value in enumerate(ch_groups):
if pick in value:
colors[pick_idx] = color_vals[ind]
break
title = 'Sensor positions (%s)' % ch_type if title is None else title
fig = _plot_sensors(pos, info, picks, colors, bads, ch_names, title,
show_names, axes, show, kind, block,
to_sphere, sphere)
if kind == 'select':
return fig, fig.lasso.selection
return fig
def _onpick_sensor(event, fig, ax, pos, ch_names, show_names):
"""Pick a channel in plot_sensors."""
if event.mouseevent.inaxes != ax:
return
if event.mouseevent.key == 'control' and fig.lasso is not None:
for ind in event.ind:
fig.lasso.select_one(ind)
return
if show_names:
return # channel names already visible
ind = event.ind[0] # Just take the first sensor.
ch_name = ch_names[ind]
this_pos = pos[ind]
# XXX: Bug in matplotlib won't allow setting the position of existing
# text item, so we create a new one.
ax.texts.pop(0)
if len(this_pos) == 3:
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_name)
else:
ax.text(this_pos[0], this_pos[1], ch_name)
fig.canvas.draw()
def _close_event(event, fig):
"""Listen for sensor plotter close event."""
if getattr(fig, 'lasso', None) is not None:
fig.lasso.disconnect()
def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names,
ax, show, kind, block, to_sphere, sphere):
"""Plot sensors."""
from matplotlib import rcParams
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .topomap import _get_pos_outlines, _draw_outlines
sphere = _check_sphere(sphere, info)
edgecolors = np.repeat(rcParams['axes.edgecolor'], len(colors))
edgecolors[bads] = 'red'
axes_was_none = ax is None
if axes_was_none:
fig = plt.figure(figsize=(max(rcParams['figure.figsize']),) * 2)
if kind == '3d':
Axes3D(fig)
ax = fig.gca(projection='3d')
else:
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
if kind == '3d':
ax.text(0, 0, 0, '', zorder=1)
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2], picker=True, c=colors,
s=75, edgecolor=edgecolors, linewidth=2)
ax.azim = 90
ax.elev = 0
ax.xaxis.set_label_text('x (m)')
ax.yaxis.set_label_text('y (m)')
ax.zaxis.set_label_text('z (m)')
else: # kind in 'select', 'topomap'
ax.text(0, 0, '', zorder=1)
pos, outlines = _get_pos_outlines(info, picks, sphere,
to_sphere=to_sphere)
_draw_outlines(ax, outlines)
pts = ax.scatter(pos[:, 0], pos[:, 1], picker=True, clip_on=False,
c=colors, edgecolors=edgecolors, s=25, lw=2)
if kind == 'select':
fig.lasso = SelectFromCollection(ax, pts, ch_names)
else:
fig.lasso = None
# Equal aspect for 3D looks bad, so only use for 2D
ax.set(aspect='equal')
if axes_was_none:
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None,
hspace=None)
ax.axis("off") # remove border around figure
del sphere
connect_picker = True
if show_names:
if isinstance(show_names, (list, np.ndarray)): # only given channels
indices = [list(ch_names).index(name) for name in show_names]
else: # all channels
indices = range(len(pos))
for idx in indices:
this_pos = pos[idx]
if kind == '3d':
ax.text(this_pos[0], this_pos[1], this_pos[2], ch_names[idx])
else:
ax.text(this_pos[0] + 0.0025, this_pos[1], ch_names[idx],
ha='left', va='center')
connect_picker = (kind == 'select')
if connect_picker:
picker = partial(_onpick_sensor, fig=fig, ax=ax, pos=pos,
ch_names=ch_names, show_names=show_names)
fig.canvas.mpl_connect('pick_event', picker)
ax.set(title=title)
closed = partial(_close_event, fig=fig)
fig.canvas.mpl_connect('close_event', closed)
plt_show(show, block=block)
return fig
def _compute_scalings(scalings, inst, remove_dc=False, duration=10):
"""Compute scalings for each channel type automatically.
Parameters
----------
scalings : dict
The scalings for each channel type. If any values are
'auto', this will automatically compute a reasonable
scaling for that channel type. Any values that aren't
'auto' will not be changed.
inst : instance of Raw or Epochs
The data for which you want to compute scalings. If data
is not preloaded, this will read a subset of times / epochs
up to 100mb in size in order to compute scalings.
remove_dc : bool
Whether to remove the mean (DC) before calculating the scalings. If
True, the mean will be computed and subtracted for short epochs in
order to compensate not only for global mean offset, but also for slow
drifts in the signals.
duration : float
If remove_dc is True, the mean will be computed and subtracted on
segments of length ``duration`` seconds.
Returns
-------
scalings : dict
A scalings dictionary with updated values
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
scalings = _handle_default('scalings_plot_raw', scalings)
if not isinstance(inst, (BaseRaw, BaseEpochs)):
raise ValueError('Must supply either Raw or Epochs')
ch_types = channel_indices_by_type(inst.info)
ch_types = {i_type: i_ixs
for i_type, i_ixs in ch_types.items() if len(i_ixs) != 0}
scalings = deepcopy(scalings)
if inst.preload is False:
if isinstance(inst, BaseRaw):
# Load a window of data from the center up to 100mb in size
n_times = 1e8 // (len(inst.ch_names) * 8)
n_times = np.clip(n_times, 1, inst.n_times)
n_secs = n_times / float(inst.info['sfreq'])
time_middle = np.mean(inst.times)
tmin = np.clip(time_middle - n_secs / 2., inst.times.min(), None)
tmax = np.clip(time_middle + n_secs / 2., None, inst.times.max())
data = inst._read_segment(tmin, tmax)
elif isinstance(inst, BaseEpochs):
# Load a random subset of epochs up to 100mb in size
n_epochs = 1e8 // (len(inst.ch_names) * len(inst.times) * 8)
n_epochs = int(np.clip(n_epochs, 1, len(inst)))
ixs_epochs = np.random.choice(range(len(inst)), n_epochs, False)
inst = inst.copy()[ixs_epochs].load_data()
else:
data = inst._data
if isinstance(inst, BaseEpochs):
data = inst._data.swapaxes(0, 1).reshape([len(inst.ch_names), -1])
# Iterate through ch types and update scaling if ' auto'
for key, value in scalings.items():
if key not in ch_types:
continue
if not (isinstance(value, str) and value == 'auto'):
try:
scalings[key] = float(value)
except Exception:
raise ValueError(
f'scalings must be "auto" or float, got scalings[{key!r}]='
f'{value!r} which could not be converted to float')
continue
this_data = data[ch_types[key]]
if remove_dc and (this_data.shape[1] / inst.info["sfreq"] >= duration):
length = int(duration * inst.info["sfreq"]) # segment length
# truncate data so that we can divide into segments of equal length
this_data = this_data[:, :this_data.shape[1] // length * length]
shape = this_data.shape # original shape
this_data = this_data.T.reshape(-1, length, shape[0]) # segment
this_data -= np.nanmean(this_data, 0) # subtract segment means
this_data = this_data.T.reshape(shape) # reshape into original
this_data = this_data.ravel()
this_data = this_data[np.isfinite(this_data)]
if this_data.size:
iqr = np.diff(np.percentile(this_data, [25, 75]))[0]
else:
iqr = 1.
scalings[key] = iqr
return scalings
def _setup_cmap(cmap, n_axes=1, norm=False):
"""Set color map interactivity."""
if cmap == 'interactive':
cmap = ('Reds' if norm else 'RdBu_r', True)
elif not isinstance(cmap, tuple):
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
cmap = (cmap, False if n_axes > 2 else True)
return cmap
def _prepare_joint_axes(n_maps, figsize=None):
"""Prepare axes for topomaps and colorbar in joint plot figure.
Parameters
----------
n_maps: int
Number of topomaps to include in the figure
figsize: tuple
Figure size, see plt.figsize
Returns
-------
fig : matplotlib.figure.Figure
Figure with initialized axes
main_ax: matplotlib.axes._subplots.AxesSubplot
Axes in which to put the main plot
map_ax: list
List of axes for each topomap
cbar_ax: matplotlib.axes._subplots.AxesSubplot
Axes for colorbar next to topomaps
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
main_ax = fig.add_subplot(212)
ts = n_maps + 2
map_ax = [plt.subplot(4, ts, x + 2 + ts) for x in range(n_maps)]
# Position topomap subplots on the second row, starting on the
# second column
cbar_ax = plt.subplot(4, 5 * (ts + 1), 10 * (ts + 1))
# Position colorbar at the very end of a more finely divided
# second row of subplots
return fig, main_ax, map_ax, cbar_ax
class DraggableColorbar(object):
"""Enable interactive colorbar.
See http://www.ster.kuleuven.be/~pieterd/python/html/plotting/interactive_colorbar.html
""" # noqa: E501
def __init__(self, cbar, mappable):
import matplotlib.pyplot as plt
self.cbar = cbar
self.mappable = mappable
self.press = None
self.cycle = sorted([i for i in dir(plt.cm) if
hasattr(getattr(plt.cm, i), 'N')])
self.cycle += [mappable.get_cmap().name]
self.index = self.cycle.index(mappable.get_cmap().name)
self.lims = (self.cbar.norm.vmin, self.cbar.norm.vmax)
self.connect()
def connect(self):
"""Connect to all the events we need."""
self.cidpress = self.cbar.patch.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.cbar.patch.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.cbar.patch.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.keypress = self.cbar.patch.figure.canvas.mpl_connect(
'key_press_event', self.key_press)
self.scroll = self.cbar.patch.figure.canvas.mpl_connect(
'scroll_event', self.on_scroll)
def on_press(self, event):
"""Handle button press."""
if event.inaxes != self.cbar.ax:
return
self.press = event.y
def key_press(self, event):
"""Handle key press."""
# print(event.key)
scale = self.cbar.norm.vmax - self.cbar.norm.vmin
perc = 0.03
if event.key == 'down':
self.index += 1
elif event.key == 'up':
self.index -= 1
elif event.key == ' ': # space key resets scale
self.cbar.norm.vmin = self.lims[0]
self.cbar.norm.vmax = self.lims[1]
elif event.key == '+':
self.cbar.norm.vmin -= (perc * scale) * -1
self.cbar.norm.vmax += (perc * scale) * -1
elif event.key == '-':
self.cbar.norm.vmin -= (perc * scale) * 1
self.cbar.norm.vmax += (perc * scale) * 1
elif event.key == 'pageup':
self.cbar.norm.vmin -= (perc * scale) * 1
self.cbar.norm.vmax -= (perc * scale) * 1
elif event.key == 'pagedown':
self.cbar.norm.vmin -= (perc * scale) * -1
self.cbar.norm.vmax -= (perc * scale) * -1
else:
return
if self.index < 0:
self.index = len(self.cycle) - 1
elif self.index >= len(self.cycle):
self.index = 0
cmap = self.cycle[self.index]
self.cbar.mappable.set_cmap(cmap)
self.cbar.draw_all()
self.mappable.set_cmap(cmap)
self._update()
def on_motion(self, event):
"""Handle mouse movements."""
if self.press is None:
return
if event.inaxes != self.cbar.ax:
return
yprev = self.press
dy = event.y - yprev
self.press = event.y
scale = self.cbar.norm.vmax - self.cbar.norm.vmin
perc = 0.03
if event.button == 1:
self.cbar.norm.vmin -= (perc * scale) * np.sign(dy)
self.cbar.norm.vmax -= (perc * scale) * np.sign(dy)
elif event.button == 3:
self.cbar.norm.vmin -= (perc * scale) * np.sign(dy)
self.cbar.norm.vmax += (perc * scale) * np.sign(dy)
self._update()
def on_release(self, event):
"""Handle release."""
self.press = None
self._update()
def on_scroll(self, event):
"""Handle scroll."""
scale = 1.1 if event.step < 0 else 1. / 1.1
self.cbar.norm.vmin *= scale
self.cbar.norm.vmax *= scale
self._update()
def _update(self):
self.cbar.set_ticks(None, update_ticks=True) # use default
self.cbar.draw_all()
self.mappable.set_norm(self.cbar.norm)
self.cbar.patch.figure.canvas.draw()
class SelectFromCollection(object):
"""Select channels from a matplotlib collection using ``LassoSelector``.
Selected channels are saved in the ``selection`` attribute. This tool
highlights selected points by fading other points out (i.e., reducing their
alpha values).
Parameters
----------
ax : instance of Axes
Axes to interact with.
collection : instance of matplotlib collection
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected points to an
alpha value of 1 and non-selected points to ``alpha_other``.
Defaults to 0.3.
linewidth_other : float
Linewidth to use for non-selected sensors. Default is 1.
Notes
-----
This tool selects collection objects based on their *origins*
(i.e., ``offsets``). Emits mpl event 'lasso_event' when selection is ready.
"""
def __init__(self, ax, collection, ch_names, alpha_other=0.5,
linewidth_other=0.5, alpha_selected=1, linewidth_selected=1):
from matplotlib import __version__
if LooseVersion(__version__) < LooseVersion('1.2.1'):
raise ImportError('Interactive selection not possible for '
'matplotlib versions < 1.2.1. Upgrade '
'matplotlib.')
from matplotlib.widgets import LassoSelector
self.canvas = ax.figure.canvas
self.collection = collection
self.ch_names = ch_names
self.alpha_other = alpha_other
self.linewidth_other = linewidth_other
self.alpha_selected = alpha_selected
self.linewidth_selected = linewidth_selected
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
self.ec = collection.get_edgecolors()
self.lw = collection.get_linewidths()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, self.Npts).reshape(self.Npts, -1)
self.ec = np.tile(self.ec, self.Npts).reshape(self.Npts, -1)
self.fc[:, -1] = self.alpha_other # deselect in the beginning
self.ec[:, -1] = self.alpha_other
self.lw = np.full(self.Npts, self.linewidth_other)
self.lasso = LassoSelector(ax, onselect=self.on_select,
lineprops=dict(color='red', linewidth=0.5))
self.selection = list()
def on_select(self, verts):
"""Select a subset from the collection."""
from matplotlib.path import Path
if len(verts) <= 3: # Seems to be a good way to exclude single clicks.
return
path = Path(verts)
inds = np.nonzero([path.contains_point(xy) for xy in self.xys])[0]
if self.canvas._key == 'control': # Appending selection.
sels = [np.where(self.ch_names == c)[0][0] for c in self.selection]
inters = set(inds) - set(sels)
inds = list(inters.union(set(sels) - set(inds)))
self.selection[:] = np.array(self.ch_names)[inds].tolist()
self.style_sensors(inds)
self.canvas.callbacks.process('lasso_event')
def select_one(self, ind):
"""Select or deselect one sensor."""
ch_name = self.ch_names[ind]
if ch_name in self.selection:
sel_ind = self.selection.index(ch_name)
self.selection.pop(sel_ind)
else:
self.selection.append(ch_name)
inds = np.in1d(self.ch_names, self.selection).nonzero()[0]
self.style_sensors(inds)
self.canvas.callbacks.process('lasso_event')
def select_many(self, inds):
"""Select many sensors using indices (for predefined selections)."""
self.selection[:] = np.array(self.ch_names)[inds].tolist()
self.style_sensors(inds)
def style_sensors(self, inds):
"""Style selected sensors as "active"."""
# reset
self.fc[:, -1] = self.alpha_other
self.ec[:, -1] = self.alpha_other / 2
self.lw[:] = self.linewidth_other
# style sensors at `inds`
self.fc[inds, -1] = self.alpha_selected
self.ec[inds, -1] = self.alpha_selected
self.lw[inds] = self.linewidth_selected
self.collection.set_facecolors(self.fc)
self.collection.set_edgecolors(self.ec)
self.collection.set_linewidths(self.lw)
self.canvas.draw_idle()
def disconnect(self):
"""Disconnect the lasso selector."""
self.lasso.disconnect_events()
self.fc[:, -1] = self.alpha_selected
self.ec[:, -1] = self.alpha_selected
self.collection.set_facecolors(self.fc)
self.collection.set_edgecolors(self.ec)
self.canvas.draw_idle()
def _get_color_list(annotations=False):
"""Get the current color list from matplotlib rcParams.
Parameters
----------
annotations : boolean
Has no influence on the function if false. If true, check if color
"red" (#ff0000) is in the cycle and remove it.
Returns
-------
colors : list
"""
from matplotlib import rcParams
color_cycle = rcParams.get('axes.prop_cycle')
if not color_cycle:
# Use deprecated color_cycle to avoid KeyErrors in environments
# with Python 2.7 and Matplotlib < 1.5
# this will already be a list
colors = rcParams.get('axes.color_cycle')
else:
# we were able to use the prop_cycle. Now just convert to list
colors = color_cycle.by_key()['color']
# If we want annotations, red is reserved ... remove if present. This
# checks for the reddish color in MPL dark background style, normal style,
# and MPL "red", and defaults to the last of those if none are present
for red in ('#fa8174', '#d62728', '#ff0000'):
if annotations and red in colors:
colors.remove(red)
break
return (colors, red) if annotations else colors
def _merge_annotations(start, stop, description, annotations, current=()):
"""Handle drawn annotations."""
ends = annotations.onset + annotations.duration
idx = np.intersect1d(np.where(ends >= start)[0],
np.where(annotations.onset <= stop)[0])
idx = np.intersect1d(idx,
np.where(annotations.description == description)[0])
new_idx = np.setdiff1d(idx, current) # don't include modified annotation
end = max(np.append((annotations.onset[new_idx] +
annotations.duration[new_idx]), stop))
onset = min(np.append(annotations.onset[new_idx], start))
duration = end - onset
annotations.delete(idx)
annotations.append(onset, duration, description)
def _connection_line(x, fig, sourceax, targetax, y=1.,
y_source_transform="transAxes"):
"""Connect source and target plots with a line.
Connect source and target plots with a line, such as time series
(source) and topolots (target). Primarily used for plot_joint
functions.
"""
from matplotlib.lines import Line2D
trans_fig = fig.transFigure
trans_fig_inv = fig.transFigure.inverted()
xt, yt = trans_fig_inv.transform(targetax.transAxes.transform([.5, 0.]))
xs, _ = trans_fig_inv.transform(sourceax.transData.transform([x, 0.]))
_, ys = trans_fig_inv.transform(getattr(sourceax, y_source_transform
).transform([0., y]))
return Line2D((xt, xs), (yt, ys), transform=trans_fig, color='grey',
linestyle='-', linewidth=1.5, alpha=.66, zorder=1,
clip_on=False)
class DraggableLine(object):
"""Custom matplotlib line for moving around by drag and drop.
Parameters
----------
line : instance of matplotlib Line2D
Line to add interactivity to.
callback : function
Callback to call when line is released.
"""
def __init__(self, line, modify_callback, drag_callback):
self.line = line
self.press = None
self.x0 = line.get_xdata()[0]
self.modify_callback = modify_callback
self.drag_callback = drag_callback
self.cidpress = self.line.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.line.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.line.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def set_x(self, x):
"""Repoisition the line."""
self.line.set_xdata([x, x])
self.x0 = x
def on_press(self, event):
"""Store button press if on top of the line."""
if event.inaxes != self.line.axes or not self.line.contains(event)[0]:
return
x0 = self.line.get_xdata()
y0 = self.line.get_ydata()
self.press = x0, y0, event.xdata, event.ydata
def on_motion(self, event):
"""Move the line on drag."""
if self.press is None:
return
if event.inaxes != self.line.axes:
return
x0, y0, xpress, ypress = self.press
dx = event.xdata - xpress
self.line.set_xdata(x0 + dx)
self.drag_callback((x0 + dx)[0])
self.line.figure.canvas.draw()
def on_release(self, event):
"""Handle release."""
if event.inaxes != self.line.axes or self.press is None:
return
self.press = None
self.line.figure.canvas.draw()
self.modify_callback(self.x0, event.xdata)
self.x0 = event.xdata
def remove(self):
"""Remove the line."""
self.line.figure.canvas.mpl_disconnect(self.cidpress)
self.line.figure.canvas.mpl_disconnect(self.cidrelease)
self.line.figure.canvas.mpl_disconnect(self.cidmotion)
self.line.figure.axes[0].lines.remove(self.line)
def _setup_ax_spines(axes, vlines, xmin, xmax, ymin, ymax, invert_y=False,
unit=None, truncate_xaxis=True, truncate_yaxis=True,
skip_axlabel=False, hline=True):
# don't show zero line if it coincides with x-axis (even if hline=True)
if hline and ymin != 0.:
axes.spines['top'].set_position('zero')
else:
axes.spines['top'].set_visible(False)
# the axes can become very small with topo plotting. This prevents the
# x-axis from shrinking to length zero if truncate_xaxis=True, by adding
# new ticks that are nice round numbers close to (but less extreme than)
# xmin and xmax
vlines = [] if vlines is None else vlines
xticks = _trim_ticks(axes.get_xticks(), xmin, xmax)
xticks = np.array(sorted(set([x for x in xticks] + vlines)))
if len(xticks) < 2:
def log_fix(tval):
exp = np.log10(np.abs(tval))
return np.sign(tval) * 10 ** (np.fix(exp) - (exp < 0))
xlims = np.array([xmin, xmax])
temp_ticks = log_fix(xlims)
closer_idx = np.argmin(np.abs(xlims - temp_ticks))
further_idx = np.argmax(np.abs(xlims - temp_ticks))
start_stop = [temp_ticks[closer_idx], xlims[further_idx]]
step = np.sign(np.diff(start_stop)) * np.max(np.abs(temp_ticks))
tts = np.arange(*start_stop, step)
xticks = np.array(sorted(xticks + [tts[0], tts[-1]]))
axes.set_xticks(xticks)
# y-axis is simpler
yticks = _trim_ticks(axes.get_yticks(), ymin, ymax)
axes.set_yticks(yticks)
# truncation case 1: truncate both
if truncate_xaxis and truncate_yaxis:
axes.spines['bottom'].set_bounds(*xticks[[0, -1]])
axes.spines['left'].set_bounds(*yticks[[0, -1]])
# case 2: truncate only x (only right side; connect to y at left)
elif truncate_xaxis:
xbounds = np.array(axes.get_xlim())
xbounds[1] = axes.get_xticks()[-1]
axes.spines['bottom'].set_bounds(*xbounds)
# case 3: truncate only y (only top; connect to x at bottom)
elif truncate_yaxis:
ybounds = np.array(axes.get_ylim())
if invert_y:
ybounds[0] = axes.get_yticks()[0]
else:
ybounds[1] = axes.get_yticks()[-1]
axes.spines['left'].set_bounds(*ybounds)
# handle axis labels
if skip_axlabel:
axes.set_yticklabels([''] * len(yticks))
axes.set_xticklabels([''] * len(xticks))
else:
if unit is not None:
axes.set_ylabel(unit, rotation=90)
axes.set_xlabel('Time (s)')
# plot vertical lines
if vlines:
_ymin, _ymax = axes.get_ylim()
axes.vlines(vlines, _ymax, _ymin, linestyles='--', colors='k',
linewidth=1., zorder=1)
# invert?
if invert_y:
axes.invert_yaxis()
# changes we always make:
axes.tick_params(direction='out')
axes.tick_params(right=False)
axes.spines['right'].set_visible(False)
axes.spines['left'].set_zorder(0)
def _handle_decim(info, decim, lowpass):
"""Handle decim parameter for plotters."""
from ..evoked import _check_decim
from ..utils import _ensure_int
if isinstance(decim, str) and decim == 'auto':
lp = info['sfreq'] if info['lowpass'] is None else info['lowpass']
lp = min(lp, info['sfreq'] if lowpass is None else lowpass)
info['lowpass'] = lp
decim = max(int(info['sfreq'] / (lp * 3) + 1e-6), 1)
decim = _ensure_int(decim, 'decim', must_be='an int or "auto"')
if decim <= 0:
raise ValueError('decim must be "auto" or a positive integer, got %s'
% (decim,))
decim = _check_decim(info, decim, 0)[0]
data_picks = _pick_data_channels(info, exclude=())
return decim, data_picks
def _setup_plot_projector(info, noise_cov, proj=True, use_noise_cov=True,
nave=1):
from ..cov import compute_whitener
projector = np.eye(len(info['ch_names']))
whitened_ch_names = []
if noise_cov is not None and use_noise_cov:
# any channels in noise_cov['bads'] but not in info['bads'] get
# set to nan, which means that they are not plotted.
data_picks = _pick_data_channels(info, with_ref_meg=False, exclude=())
data_names = {info['ch_names'][pick] for pick in data_picks}
# these can be toggled by the user
bad_names = set(info['bads'])
# these can't in standard pipelines be enabled (we always take the
# union), so pretend they're not in cov at all
cov_names = ((set(noise_cov['names']) & set(info['ch_names'])) -
set(noise_cov['bads']))
# Actually compute the whitener only using the difference
whiten_names = cov_names - bad_names
whiten_picks = pick_channels(info['ch_names'], whiten_names)
whiten_info = pick_info(info, whiten_picks)
rank = _triage_rank_sss(whiten_info, [noise_cov])[1][0]
whitener, whitened_ch_names = compute_whitener(
noise_cov, whiten_info, rank=rank, verbose=False)
whitener *= np.sqrt(nave) # proper scaling for Evoked data
assert set(whitened_ch_names) == whiten_names
projector[whiten_picks, whiten_picks[:, np.newaxis]] = whitener
# Now we need to change the set of "whitened" channels to include
# all data channel names so that they are properly italicized.
whitened_ch_names = data_names
# We would need to set "bad_picks" to identity to show the traces
# (but in gray), but here we don't need to because "projector"
# starts out as identity. So all that is left to do is take any
# *good* data channels that are not in the noise cov to be NaN
nan_names = data_names - (bad_names | cov_names)
# XXX conditional necessary because of annoying behavior of
# pick_channels where an empty list means "all"!
if len(nan_names) > 0:
nan_picks = pick_channels(info['ch_names'], nan_names)
projector[nan_picks] = np.nan
elif proj:
projector, _ = setup_proj(info, add_eeg_ref=False, verbose=False)
return projector, whitened_ch_names
def _check_sss(info):
"""Check SSS history in info."""
ch_used = [ch for ch in _DATA_CH_TYPES_SPLIT
if _contains_ch_type(info, ch)]
has_meg = 'mag' in ch_used and 'grad' in ch_used
has_sss = (has_meg and len(info['proc_history']) > 0 and
info['proc_history'][0].get('max_info') is not None)
return ch_used, has_meg, has_sss
def _triage_rank_sss(info, covs, rank=None, scalings=None):
rank = dict() if rank is None else rank
scalings = _handle_default('scalings_cov_rank', scalings)
# Only look at good channels
picks = _pick_data_channels(info, with_ref_meg=False, exclude='bads')
info = pick_info(info, picks)
ch_used, has_meg, has_sss = _check_sss(info)
if has_sss:
if 'mag' in rank or 'grad' in rank:
raise ValueError('When using SSS, pass "meg" to set the rank '
'(separate rank values for "mag" or "grad" are '
'meaningless).')
elif 'meg' in rank:
raise ValueError('When not using SSS, pass separate rank values '
'for "mag" and "grad" (do not use "meg").')
picks_list = _picks_by_type(info, meg_combined=has_sss)
if has_sss:
# reduce ch_used to combined mag grad
ch_used = list(zip(*picks_list))[0]
# order pick list by ch_used (required for compat with plot_evoked)
picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
n_ch_used = len(ch_used)
# make sure we use the same rank estimates for GFP and whitening
picks_list2 = [k for k in picks_list]
# add meg picks if needed.
if has_meg:
# append ("meg", picks_meg)
picks_list2 += _picks_by_type(info, meg_combined=True)
rank_list = [] # rank dict for each cov
for cov in covs:
# We need to add the covariance projectors, compute the projector,
# and apply it, just like we will do in prepare_noise_cov, otherwise
# we risk the rank estimates being incorrect (i.e., if the projectors
# do not match).
info_proj = info.copy()
info_proj['projs'] += cov['projs']
this_rank = {}
# assemble rank dict for this cov, such that we have meg
for ch_type, this_picks in picks_list2:
# if we have already estimates / values for mag/grad but not
# a value for meg, combine grad and mag.
if ('mag' in this_rank and 'grad' in this_rank and
'meg' not in rank):
this_rank['meg'] = this_rank['mag'] + this_rank['grad']
# and we're done here
break
if rank.get(ch_type) is None:
ch_names = [info['ch_names'][pick] for pick in this_picks]
this_C = pick_channels_cov(cov, ch_names)
this_estimated_rank = compute_rank(
this_C, scalings=scalings, info=info_proj)[ch_type]
this_rank[ch_type] = this_estimated_rank
elif rank.get(ch_type) is not None:
this_rank[ch_type] = rank[ch_type]
rank_list.append(this_rank)
return n_ch_used, rank_list, picks_list, has_sss
def _check_cov(noise_cov, info):
"""Check the noise_cov for whitening and issue an SSS warning."""
from ..cov import read_cov, Covariance
if noise_cov is None:
return None
if isinstance(noise_cov, str):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, Covariance):
raise TypeError('noise_cov must be a str or Covariance, got %s'
% (type(noise_cov),))
if _check_sss(info)[2]: # has_sss
warn('Data have been processed with SSS, which changes the relative '
'scaling of magnetometers and gradiometers when viewing data '
'whitened by a noise covariance')
return noise_cov
def _set_title_multiple_electrodes(title, combine, ch_names, max_chans=6,
all=False, ch_type=None):
"""Prepare a title string for multiple electrodes."""
if title is None:
title = ", ".join(ch_names[:max_chans])
ch_type = _channel_type_prettyprint.get(ch_type, ch_type)
if ch_type is None:
ch_type = "sensor"
if len(ch_names) > 1:
ch_type += "s"
if all is True and isinstance(combine, str):
combine = combine.capitalize()
title = "{} of {} {}".format(
combine, len(ch_names), ch_type)
elif len(ch_names) > max_chans and combine != "gfp":
logger.info("More than {} channels, truncating title ...".format(
max_chans))
title += ", ...\n({} of {} {})".format(
combine, len(ch_names), ch_type,)
return title
def _check_time_unit(time_unit, times):
if not isinstance(time_unit, str):
raise TypeError('time_unit must be str, got %s' % (type(time_unit),))
if time_unit == 's':
pass
elif time_unit == 'ms':
times = 1e3 * times
else:
raise ValueError("time_unit must be 's' or 'ms', got %r" % time_unit)
return time_unit, times
def _plot_masked_image(ax, data, times, mask=None, yvals=None,
cmap="RdBu_r", vmin=None, vmax=None, ylim=None,
mask_style="both", mask_alpha=.25, mask_cmap="Greys",
yscale="linear"):
"""Plot a potentially masked (evoked, TFR, ...) 2D image."""
from matplotlib import ticker, __version__ as mpl_version
if mask_style is None and mask is not None:
mask_style = "both" # default
draw_mask = mask_style in {"both", "mask"}
draw_contour = mask_style in {"both", "contour"}
if cmap is None:
mask_cmap = cmap
# mask param check and preparation
if draw_mask is None:
if mask is not None:
draw_mask = True
else:
draw_mask = False
if draw_contour is None:
if mask is not None:
draw_contour = True
else:
draw_contour = False
if mask is None:
if draw_mask:
warn("`mask` is None, not masking the plot ...")
draw_mask = False
if draw_contour:
warn("`mask` is None, not adding contour to the plot ...")
draw_contour = False
if draw_mask:
if mask.shape != data.shape:
raise ValueError(
"The mask must have the same shape as the data, "
"i.e., %s, not %s" % (data.shape, mask.shape))
if draw_contour and yscale == "log":
warn("Cannot draw contours with linear yscale yet ...")
if yvals is None: # for e.g. Evoked images
yvals = np.arange(data.shape[0])
# else, if TFR plot, yvals will be freqs
# test yscale
if yscale == 'log' and not yvals[0] > 0:
raise ValueError('Using log scale for frequency axis requires all your'
' frequencies to be positive (you cannot include'
' the DC component (0 Hz) in the TFR).')
if len(yvals) < 2 or yvals[0] == 0:
yscale = 'linear'
elif yscale != 'linear':
ratio = yvals[1:] / yvals[:-1]
if yscale == 'auto':
if yvals[0] > 0 and np.allclose(ratio, ratio[0]):
yscale = 'log'
else:
yscale = 'linear'
# https://github.com/matplotlib/matplotlib/pull/9477
if yscale == "log" and mpl_version == "2.1.0":
warn("With matplotlib version 2.1.0, lines may not show up in "
"`AverageTFR.plot_joint`. Upgrade to a more recent version.")
if yscale == "log": # pcolormesh for log scale
# compute bounds between time samples
time_lims, = centers_to_edges(times)
log_yvals = np.concatenate([[yvals[0] / ratio[0]], yvals,
[yvals[-1] * ratio[0]]])
yval_lims = np.sqrt(log_yvals[:-1] * log_yvals[1:])
# construct a time-yvaluency bounds grid
time_mesh, yval_mesh = np.meshgrid(time_lims, yval_lims)
if mask is not None:
ax.pcolormesh(time_mesh, yval_mesh, data, cmap=mask_cmap,
vmin=vmin, vmax=vmax, alpha=mask_alpha)
im = ax.pcolormesh(time_mesh, yval_mesh,
np.ma.masked_where(~mask, data), cmap=cmap,
vmin=vmin, vmax=vmax, alpha=1)
else:
im = ax.pcolormesh(time_mesh, yval_mesh, data, cmap=cmap,
vmin=vmin, vmax=vmax)
if ylim is None:
ylim = yval_lims[[0, -1]]
if yscale == 'log':
ax.set_yscale('log')
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
# get rid of minor ticks
ax.yaxis.set_minor_locator(ticker.NullLocator())
tick_vals = yvals[np.unique(np.linspace(
0, len(yvals) - 1, 12).round().astype('int'))]
ax.set_yticks(tick_vals)
else:
# imshow for linear because the y ticks are nicer
# and the masked areas look better
dt = np.median(np.diff(times)) / 2. if len(times) > 1 else 0.1
dy = np.median(np.diff(yvals)) / 2. if len(yvals) > 1 else 0.5
extent = [times[0] - dt, times[-1] + dt,
yvals[0] - dy, yvals[-1] + dy]
im_args = dict(interpolation='nearest', origin='lower',
extent=extent, aspect='auto', vmin=vmin, vmax=vmax)
if draw_mask:
ax.imshow(data, alpha=mask_alpha, cmap=mask_cmap, **im_args)
im = ax.imshow(
np.ma.masked_where(~mask, data), cmap=cmap, **im_args)
else:
ax.imshow(data, cmap=cmap, **im_args) # see #6481
im = ax.imshow(data, cmap=cmap, **im_args)
if draw_contour and np.unique(mask).size == 2:
big_mask = np.kron(mask, np.ones((10, 10)))
ax.contour(big_mask, colors=["k"], extent=extent,
linewidths=[.75], corner_mask=False,
antialiased=False, levels=[.5])
time_lims = [extent[0], extent[1]]
if ylim is None:
ylim = [extent[2], extent[3]]
ax.set_xlim(time_lims[0], time_lims[-1])
ax.set_ylim(ylim)
if (draw_mask or draw_contour) and mask is not None:
if mask.all():
t_end = ", all points masked)"
else:
fraction = 1 - (np.float64(mask.sum()) / np.float64(mask.size))
t_end = ", %0.3g%% of points masked)" % (fraction * 100,)
else:
t_end = ")"
return im, t_end
@fill_doc
def _make_combine_callable(combine):
"""Convert None or string values of ``combine`` into callables.
Params
------
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_epochs, n_channels, n_times)`` or ``(n_evokeds, n_channels,
n_times)``) and return an :class:`array <numpy.ndarray>` of shape
``(n_epochs, n_times)`` or ``(n_evokeds, n_times)``.
"""
if combine is None:
combine = partial(np.squeeze, axis=1)
elif isinstance(combine, str):
combine_dict = {key: partial(getattr(np, key), axis=1)
for key in ('mean', 'median', 'std')}
combine_dict['gfp'] = lambda data: np.sqrt((data ** 2).mean(axis=1))
try:
combine = combine_dict[combine]
except KeyError:
raise ValueError('"combine" must be None, a callable, or one of '
'"mean", "median", "std", or "gfp"; got {}'
''.format(combine))
return combine
def center_cmap(cmap, vmin, vmax, name="cmap_centered"):
"""Center given colormap (ranging from vmin to vmax) at value 0.
Parameters
----------
cmap : matplotlib.colors.Colormap
The colormap to center around 0.
vmin : float
Minimum value in the data to map to the lower end of the colormap.
vmax : float
Maximum value in the data to map to the upper end of the colormap.
name : str
Name of the new colormap. Defaults to 'cmap_centered'.
Returns
-------
cmap_centered : matplotlib.colors.Colormap
The new colormap centered around 0.
Notes
-----
This function can be used in situations where vmin and vmax are not
symmetric around zero. Normally, this results in the value zero not being
mapped to white anymore in many colormaps. Using this function, the value
zero will be mapped to white even for asymmetric positive and negative
value ranges. Note that this could also be achieved by re-normalizing a
given colormap by subclassing matplotlib.colors.Normalize as described
here:
https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges
""" # noqa: E501
from matplotlib.colors import LinearSegmentedColormap
vzero = abs(vmin) / float(vmax - vmin)
index_old = np.linspace(0, 1, cmap.N)
index_new = np.hstack([np.linspace(0, vzero, cmap.N // 2, endpoint=False),
np.linspace(vzero, 1, cmap.N // 2)])
colors = "red", "green", "blue", "alpha"
cdict = {name: [] for name in colors}
for old, new in zip(index_old, index_new):
for color, name in zip(cmap(old), colors):
cdict[name].append((new, color, color))
return LinearSegmentedColormap(name, cdict)
def _convert_psds(psds, dB, estimate, scaling, unit, ch_names=None,
first_dim='channel'):
"""Convert PSDs to dB (if necessary) and appropriate units.
The following table summarizes the relationship between the value of
parameters ``dB`` and ``estimate``, and the type of plot and corresponding
units.
| dB | estimate | plot | units |
|-------+-------------+------+-------------------|
| True | 'power' | PSD | amp**2/Hz (dB) |
| True | 'amplitude' | ASD | amp/sqrt(Hz) (dB) |
| True | 'auto' | PSD | amp**2/Hz (dB) |
| False | 'power' | PSD | amp**2/Hz |
| False | 'amplitude' | ASD | amp/sqrt(Hz) |
| False | 'auto' | ASD | amp/sqrt(Hz) |
where amp are the units corresponding to the variable, as specified by
``unit``.
"""
_check_option('first_dim', first_dim, ['channel', 'epoch'])
where = np.where(psds.min(1) <= 0)[0]
if len(where) > 0:
# Construct a helpful error message, depending on whether the first
# dimension of `psds` are channels or epochs.
if dB:
bad_value = 'Infinite'
else:
bad_value = 'Zero'
if first_dim == 'channel':
bads = ', '.join(ch_names[ii] for ii in where)
else:
bads = ', '.join(str(ii) for ii in where)
msg = f'{bad_value} value in PSD for {first_dim}{_pl(where)} {bads}.'
if first_dim == 'channel':
msg += '\nThese channels might be dead.'
warn(msg, UserWarning)
if estimate == 'auto':
estimate = 'power' if dB else 'amplitude'
if estimate == 'amplitude':
np.sqrt(psds, out=psds)
psds *= scaling
ylabel = r'$\mathrm{%s/\sqrt{Hz}}$' % unit
else:
psds *= scaling * scaling
if '/' in unit:
unit = '(%s)' % unit
ylabel = r'$\mathrm{%s²/Hz}$' % unit
if dB:
np.log10(np.maximum(psds, np.finfo(float).tiny), out=psds)
psds *= 10
ylabel += r'$\ \mathrm{(dB)}$'
return ylabel
def _plot_psd(inst, fig, freqs, psd_list, picks_list, titles_list,
units_list, scalings_list, ax_list, make_label, color, area_mode,
area_alpha, dB, estimate, average, spatial_colors, xscale,
line_alpha, sphere, xlabels_list):
# helper function for plot_raw_psd and plot_epochs_psd
from matplotlib.ticker import ScalarFormatter
from .evoked import _plot_lines
for key, ls in zip(['lowpass', 'highpass', 'line_freq'],
['--', '--', '-.']):
if inst.info[key] is not None:
for ax in ax_list:
ax.axvline(inst.info[key], color='k', linestyle=ls,
alpha=0.25, linewidth=2, zorder=2)
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
ylabels = list()
for ii, (psd, picks, title, ax, scalings, units) in enumerate(zip(
psd_list, picks_list, titles_list, ax_list,
scalings_list, units_list)):
ylabel = _convert_psds(psd, dB, estimate, scalings, units,
[inst.ch_names[pi] for pi in picks])
ylabels.append(ylabel)
del ylabel
if average:
# mean across channels
psd_mean = np.mean(psd, axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(psd, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(psd, axis=0),
np.max(psd, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha,
linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
facecolor=color, alpha=area_alpha)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array(inst.get_channel_types(picks=picks))
# Needed because the data do not match the info anymore.
info = create_info([inst.ch_names[p] for p in picks],
inst.info['sfreq'], types)
info['chs'] = [inst.info['chs'][p] for p in picks]
info['dev_head_t'] = inst.info['dev_head_t']
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
assert len(ch_types_used) == len(ax_list)
unit = ''
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(psd_list, info, picks, fig, ax_list, spatial_colors,
unit, units=units, scalings=None, hline=None, gfp=False,
types=types, zorder='std', xlim=(freqs[0], freqs[-1]),
ylim=None, times=freqs, bad_ch_idx=[], titles=titles,
ch_types_used=ch_types_used, selectable=True, psd=True,
line_alpha=line_alpha, nave=None, time_unit='ms',
sphere=sphere)
for ii, (ax, xlabel) in enumerate(zip(ax_list, xlabels_list)):
ax.grid(True, linestyle=':')
if xscale == 'log':
ax.set(xscale='log')
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
else: # xscale == 'linear'
ax.set(xlim=(freqs[0], freqs[-1]))
if make_label:
ax.set(ylabel=ylabels[ii], title=titles_list[ii])
if xlabel:
ax.set_xlabel('Frequency (Hz)')
if make_label:
fig.align_ylabels(axs=ax_list)
return fig
def _trim_ticks(ticks, _min, _max):
"""Remove ticks that are more extreme than the given limits."""
keep = np.where(np.logical_and(ticks >= _min, ticks <= _max))
return ticks[keep]
def _set_window_title(fig, title):
if fig.canvas.manager is not None:
fig.canvas.manager.set_window_title(title)
def _shorten_path_from_middle(fpath, max_len=60, replacement='...'):
"""Truncate a path from the middle by omitting complete path elements."""
from os.path import sep
if len(fpath) > max_len:
pathlist = fpath.split(sep)
# indices starting from middle, alternating sides, omitting final elem:
# range(8) → 3, 4, 2, 5, 1, 6; range(7) → 2, 3, 1, 4, 0, 5
ixs_to_trunc = list(zip(range(len(pathlist) // 2 - 1, -1, -1),
range(len(pathlist) // 2, len(pathlist) - 1)))
ixs_to_trunc = np.array(ixs_to_trunc).flatten()
for ix in ixs_to_trunc:
pathlist[ix] = replacement
truncs = (np.array(pathlist) == replacement).nonzero()[0]
newpath = sep.join(pathlist[:truncs[0]] + pathlist[truncs[-1]:])
if len(newpath) < max_len:
break
return newpath
return fpath
def centers_to_edges(*arrays):
"""Convert center points to edges.
Parameters
----------
*arrays : list of ndarray
Each input array should be 1D monotonically increasing,
and will be cast to float.
Returns
-------
arrays : list of ndarray
Given each input of shape (N,), the output will have shape (N+1,).
Examples
--------
>>> x = [0., 0.1, 0.2, 0.3]
>>> y = [20, 30, 40]
>>> centers_to_edges(x, y) # doctest: +SKIP
[array([-0.05, 0.05, 0.15, 0.25, 0.35]), array([15., 25., 35., 45.])]
"""
out = list()
for ai, arr in enumerate(arrays):
arr = np.asarray(arr, dtype=float)
_check_option(f'arrays[{ai}].ndim', arr.ndim, (1,))
if len(arr) > 1:
arr_diff = np.diff(arr) / 2.
else:
arr_diff = [abs(arr[0]) * 0.001] if arr[0] != 0 else [0.001]
out.append(np.concatenate([
[arr[0] - arr_diff[0]],
arr[:-1] + arr_diff,
[arr[-1] + arr_diff[-1]]]))
return out
|
bsd-3-clause
|
btabibian/scikit-learn
|
examples/neighbors/plot_regression.py
|
349
|
1402
|
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
bsd-3-clause
|
asengupta/avishek.net
|
code/graphs/plane-v-containing-vector-u.py
|
1
|
1177
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from labellines import labelLine, labelLines
ax = plt.axes()
# Ensure that the next plot doesn't overwrite the first plot
point = np.array([0, 0, 0])
normal = np.array([1, 0, 0])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
# zz, yy = np.meshgrid(range(-2, 2, 0.5), range(-2, 2, 1))
xx, yy = np.meshgrid(np.linspace(-2., 2., num=5), np.linspace(-2., 2., num=5))
zz = yy*0
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. /normal[2]
# plot the surface
plt3d = plt.subplot(projection='3d')
plt3d.plot_wireframe(xx, yy, zz)
plt3d.quiver(0, 0, 0, 1., 0, 0., colors = 'black', linewidths=2, arrow_length_ratio=0.2)
plt3d.text(1, 0, 0, "U=Vector $(1,1,0)$")
plt3d.text(0, -2, 3.5, "V=The Plane $z=0$")
# plt3d.text(5, 5, 5, "(5,5,5)")
plt3d.set_xticks(range(-2, 3))
plt3d.set_yticks(range(-2, 3))
plt3d.set_zticks(range(-2, 3))
plt3d.set_xlim([-2,2])
plt3d.set_ylim([-2,2])
plt3d.set_zlim([-2,2])
plt3d.set_xlabel('X axis')
plt3d.set_ylabel('Y axis')
plt3d.set_zlabel('Z axis')
plt.show()
|
gpl-2.0
|
bhargav/scikit-learn
|
sklearn/utils/tests/test_murmurhash.py
|
65
|
2838
|
# Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
|
bsd-3-clause
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/ease/grade.py
|
3
|
7086
|
"""
Functions to score specified data using specified ML models
"""
import sys
import pickle
import os
import numpy
import logging
#Append sys to base path to import the following modules
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
#Depend on base path to be imported
from essay_set import EssaySet
import predictor_extractor
import predictor_set
import util_functions
#Imports needed to unpickle grader data
import feature_extractor
import sklearn.ensemble
import math
log = logging.getLogger(__name__)
def grade(grader_data,submission):
"""
Grades a specified submission using specified models
grader_data - A dictionary:
{
'model' : trained model,
'extractor' : trained feature extractor,
'prompt' : prompt for the question,
'algorithm' : algorithm for the question,
}
submission - The student submission (string)
"""
#Initialize result dictionary
results = {'errors': [],'tests': [],'score': 0, 'feedback' : "", 'success' : False, 'confidence' : 0}
has_error=False
grader_set=EssaySet(essaytype="test")
feedback = {}
model, extractor = get_classifier_and_ext(grader_data)
#This is to preserve legacy functionality
if 'algorithm' not in grader_data:
grader_data['algorithm'] = util_functions.AlgorithmTypes.classification
try:
#Try to add essay to essay set object
grader_set.add_essay(str(submission),0)
grader_set.update_prompt(str(grader_data['prompt']))
except Exception:
error_message = "Essay could not be added to essay set:{0}".format(submission)
log.exception(error_message)
results['errors'].append(error_message)
has_error=True
#Try to extract features from submission and assign score via the model
try:
grader_feats=extractor.gen_feats(grader_set)
feedback=extractor.gen_feedback(grader_set,grader_feats)[0]
results['score']=int(model.predict(grader_feats)[0])
except Exception:
error_message = "Could not extract features and score essay."
log.exception(error_message)
results['errors'].append(error_message)
has_error=True
#Try to determine confidence level
try:
results['confidence'] = get_confidence_value(grader_data['algorithm'], model, grader_feats, results['score'], grader_data['score'])
except Exception:
#If there is an error getting confidence, it is not a show-stopper, so just log
log.exception("Problem generating confidence value")
if not has_error:
#If the essay is just a copy of the prompt, return a 0 as the score
if( 'too_similar_to_prompt' in feedback and feedback['too_similar_to_prompt']):
results['score']=0
results['correct']=False
results['success']=True
#Generate short form output--number of problem areas identified in feedback
#Add feedback to results if available
results['feedback'] = {}
if 'topicality' in feedback and 'prompt_overlap' in feedback:
results['feedback'].update({
'topicality' : feedback['topicality'],
'prompt-overlap' : feedback['prompt_overlap'],
})
results['feedback'].update(
{
'spelling' : feedback['spelling'],
'grammar' : feedback['grammar'],
'markup-text' : feedback['markup_text'],
}
)
else:
#If error, success is False.
results['success']=False
return results
def grade_generic(grader_data, numeric_features, textual_features):
"""
Grades a set of numeric and textual features using a generic model
grader_data -- dictionary containing:
{
'algorithm' - Type of algorithm to use to score
}
numeric_features - list of numeric features to predict on
textual_features - list of textual feature to predict on
"""
results = {'errors': [],'tests': [],'score': 0, 'success' : False, 'confidence' : 0}
has_error=False
#Try to find and load the model file
grader_set=predictor_set.PredictorSet(essaytype="test")
model, extractor = get_classifier_and_ext(grader_data)
#Try to add essays to essay set object
try:
grader_set.add_row(numeric_features, textual_features,0)
except Exception:
error_msg = "Row could not be added to predictor set:{0} {1}".format(numeric_features, textual_features)
log.exception(error_msg)
results['errors'].append(error_msg)
has_error=True
#Try to extract features from submission and assign score via the model
try:
grader_feats=extractor.gen_feats(grader_set)
results['score']=model.predict(grader_feats)[0]
except Exception:
error_msg = "Could not extract features and score essay."
log.exception(error_msg)
results['errors'].append(error_msg)
has_error=True
#Try to determine confidence level
try:
results['confidence'] = get_confidence_value(grader_data['algorithm'],model, grader_feats, results['score'])
except Exception:
#If there is an error getting confidence, it is not a show-stopper, so just log
log.exception("Problem generating confidence value")
if not has_error:
results['success'] = True
return results
def get_confidence_value(algorithm,model,grader_feats,score, scores):
"""
Determines a confidence in a certain score, given proper input parameters
algorithm- from util_functions.AlgorithmTypes
model - a trained model
grader_feats - a row of features used by the model for classification/regression
score - The score assigned to the submission by a prior model
"""
min_score=min(numpy.asarray(scores))
max_score=max(numpy.asarray(scores))
if algorithm == util_functions.AlgorithmTypes.classification and hasattr(model, "predict_proba"):
#If classification, predict with probability, which gives you a matrix of confidences per score point
raw_confidence=model.predict_proba(grader_feats)[0,(float(score)-float(min_score))]
#TODO: Normalize confidence somehow here
confidence=raw_confidence
elif hasattr(model, "predict"):
raw_confidence = model.predict(grader_feats)[0]
confidence = max(float(raw_confidence) - math.floor(float(raw_confidence)), math.ceil(float(raw_confidence)) - float(raw_confidence))
else:
confidence = 0
return confidence
def get_classifier_and_ext(grader_data):
if 'classifier' in grader_data:
model = grader_data['classifier']
elif 'model' in grader_data:
model = grader_data['model']
else:
raise Exception("Cannot find a valid model.")
if 'feature_ext' in grader_data:
extractor = grader_data['feature_ext']
elif 'extractor' in grader_data:
extractor = grader_data['extractor']
else:
raise Exception("Cannot find the extractor")
return model, extractor
|
agpl-3.0
|
nhmc/xastropy
|
xastropy/spec/analysis.py
|
6
|
4784
|
"""
#;+
#; NAME:
#; analysis
#; Version 1.0
#;
#; PURPOSE:
#; Module for Analysis of Spectra
#; 07-Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import xastropy
import numpy as np
import matplotlib.pyplot as plt
import pdb
from astropy import constants as const
import xastropy.atomic as xatom
from xastropy.xutils import xdebug as xdb
#class Spectral_Line(object):
#def pixminmax(spec, zabs, wrest, vmnx):
#def x_contifit(specfil, outfil=None, savfil=None, redshift=0., divmult=1, forest_divmult=1):
# Class for Ionic columns of a given line
class Spectral_Line(object):
"""Class for analysis of a given spectral line
Attributes:
wrest: float
Rest wavelength of the spectral feature
"""
# Initialize with wavelength
def __init__(self, wrest, clm_file=None):
self.wrest = wrest
self.atomic = {} # Atomic Data
self.analy = {} # Analysis inputs (from .clm file or AbsID)
self.measure = {} # Measured quantities (e.g. column, EW, centroid)
# Fill
self.fill()
# Fill Analy
def fill(self):
import xastropy.spec.abs_line as xspa
# Data
self.atomic = xspa.abs_line_data(self.wrest)
#
self.analy['VLIM'] = [0., 0.] # km/s
self.analy['FLG_ANLY'] = 1 # Analyze
self.analy['FLG_EYE'] = 0
self.analy['FLG_LIMIT'] = 0 # No limit
self.analy['DATFIL'] = ''
self.analy['IONNM'] = self.atomic['name']
# Output
def __repr__(self):
return ('[{:s}: wrest={:g}]'.format(
self.__class__.__name__, self.wrest))
#### ###############################
def pixminmax(*args):
''' Soon to be deprecated..
Use Spectrum1D.pix_minmax()
'''
xdb.set_trace()
#### ###############################
# Calls plotvel (Crighton)
# Adapted from N. Tejos scripts
#
def velplt(specfil):
''' Soon to be deprecated..
'''
# Imports
from plotspec import plotvel_util as pspv
reload(pspv)
import xastropy as xa
from subprocess import Popen
# Initialize
if 'f26_fil' not in locals():
f26_fil = 'tmp.f26'
command = ['touch',f26_fil]
print(Popen(command))
print('xa.spec.analysis.velplt: Generated a dummy f26 file -- ', f26_fil)
if 'transfil' not in locals():
path = xa.__path__
transfil = path[0]+'/spec/Data/initial_search.lines'
# Call
pspv.main([specfil, 'f26='+f26_fil, 'transitions='+transfil])
'''
#### ###############################
# Calls Barak routines to fit the continuum
# Stolen from N. Tejos by JXP
#
def x_contifit(specfil, outfil=None, savfil=None, redshift=0., divmult=1, forest_divmult=1):
import os
import barak.fitcont as bf
from barak.spec import read
from barak.io import saveobj, loadobj
import xastropy.spec.readwrite as xsr
reload(xsr)
reload(bf)
# Initialize
if savfil == None:
savfil = 'conti.sav'
if outfil == None:
outfil = 'conti.fits'
# Read spectrum + convert to Barak format
sp = xsr.readspec(specfil)
# Fit spline continuum:
if os.path.lexists(savfil): #'contfit_' + name + '.sav'):
option = raw_input('Adjust old continuum? (y)/n: ')
if option.lower() != 'n':
co_old, knots_old = loadobj(savfil) #'contfit_' + name + '.sav')
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
oldco=co_old, knots=knots_old,
divmult=divmult,
forest_divmult=forest_divmult)
else:
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
divmult=divmult,
forest_divmult=forest_divmult)
else:
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
divmult=divmult,
forest_divmult=forest_divmult)
os.remove('_knots.sav')
# Save continuum:
saveobj(savfil, (co, knots), overwrite=1)
# Check continuum:
print('Plotting new continuum')
plt.clf()
plt.plot(sp.wa, sp.fl, drawstyle='steps-mid')
plt.plot(sp.wa, sp.co, color='r')
plt.show()
# Repeat?
confirm = raw_input('Keep continuum? (y)/n: ')
if confirm == 'y':
fits.writeto(outfil, sp, clobber=True)
else:
print('Writing to tmp.fits anyhow!')
fits.writeto('tmp.fits', sp, clobber=True)
#print name
## Output
# Data file with continuum
'''
|
bsd-3-clause
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/example/svrg_module/linear_regression/data_reader.py
|
8
|
2460
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import bz2
import os
import shutil
import mxnet as mx
import numpy as np
from sklearn.datasets import load_svmlight_file
# Download data file
# YearPredictionMSD dataset: https://archive.ics.uci.edu/ml/datasets/yearpredictionmsd
def get_year_prediction_data(dirname=None):
feature_dim = 90
if dirname is None:
dirname = os.path.join(os.path.dirname(__file__), 'data')
filename = 'YearPredictionMSD'
download_filename = os.path.join(dirname, "%s.bz2" % filename)
extracted_filename = os.path.join(dirname, filename)
if not os.path.isfile(download_filename):
print("Downloading data...")
mx.test_utils.download('https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/%s.bz2' % filename, dirname=dirname)
if not os.path.isfile(extracted_filename):
print("Extracting data...")
with bz2.BZ2File(download_filename) as fr, open(extracted_filename,"wb") as fw:
shutil.copyfileobj(fr,fw)
print("Reading data from disk...")
train_features, train_labels = load_svmlight_file(extracted_filename, n_features=feature_dim, dtype=np.float32)
train_features = train_features.todense()
# normalize the data: subtract means and divide by standard deviations
label_mean = train_labels.mean()
label_std = np.sqrt(np.square(train_labels - label_mean).mean())
feature_means = train_features.mean(axis=0)
feature_stds = np.sqrt(np.square(train_features - feature_means).mean(axis=0))
train_features = (train_features - feature_means) / feature_stds
train_labels = (train_labels - label_mean) / label_std
return feature_dim, train_features, train_labels
|
apache-2.0
|
jaidevd/scikit-learn
|
sklearn/cluster/tests/test_birch.py
|
342
|
5603
|
"""
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
|
bsd-3-clause
|
petosegan/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
130
|
6059
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
|
bsd-3-clause
|
jonkrohn/study-group
|
neural-networks-and-deep-learning/src/conv.py
|
5
|
12662
|
"""conv.py
~~~~~~~~~~
Code for many of the experiments involving convolutional networks in
Chapter 6 of the book 'Neural Networks and Deep Learning', by Michael
Nielsen. The code essentially duplicates (and parallels) what is in
the text, so this is simply a convenience, and has not been commented
in detail. Consult the original text for more details.
"""
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import network3
from network3 import sigmoid, tanh, ReLU, Network
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
training_data, validation_data, test_data = network3.load_data_shared()
mini_batch_size = 10
def shallow(n=3, epochs=60):
nets = []
for j in range(n):
print "A shallow net with 100 hidden neurons"
net = Network([
FullyConnectedLayer(n_in=784, n_out=100),
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(
training_data, epochs, mini_batch_size, 0.1,
validation_data, test_data)
nets.append(net)
return nets
def basic_conv(n=3, epochs=60):
for j in range(n):
print "Conv + FC architecture"
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2)),
FullyConnectedLayer(n_in=20*12*12, n_out=100),
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(
training_data, epochs, mini_batch_size, 0.1, validation_data, test_data)
return net
def omit_FC():
for j in range(3):
print "Conv only, no FC"
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2)),
SoftmaxLayer(n_in=20*12*12, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
return net
def dbl_conv(activation_fn=sigmoid):
for j in range(3):
print "Conv + Conv + FC architecture"
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2),
activation_fn=activation_fn),
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
filter_shape=(40, 20, 5, 5),
poolsize=(2, 2),
activation_fn=activation_fn),
FullyConnectedLayer(
n_in=40*4*4, n_out=100, activation_fn=activation_fn),
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
return net
# The following experiment was eventually omitted from the chapter,
# but I've left it in here, since it's an important negative result:
# basic l2 regularization didn't help much. The reason (I believe) is
# that using convolutional-pooling layers is already a pretty strong
# regularizer.
def regularized_dbl_conv():
for lmbda in [0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]:
for j in range(3):
print "Conv + Conv + FC num %s, with regularization %s" % (j, lmbda)
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2)),
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
filter_shape=(40, 20, 5, 5),
poolsize=(2, 2)),
FullyConnectedLayer(n_in=40*4*4, n_out=100),
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data, lmbda=lmbda)
def dbl_conv_relu():
for lmbda in [0.0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0]:
for j in range(3):
print "Conv + Conv + FC num %s, relu, with regularization %s" % (j, lmbda)
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
filter_shape=(40, 20, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
FullyConnectedLayer(n_in=40*4*4, n_out=100, activation_fn=ReLU),
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
net.SGD(training_data, 60, mini_batch_size, 0.03, validation_data, test_data, lmbda=lmbda)
#### Some subsequent functions may make use of the expanded MNIST
#### data. That can be generated by running expand_mnist.py.
def expanded_data(n=100):
"""n is the number of neurons in the fully-connected layer. We'll try
n=100, 300, and 1000.
"""
expanded_training_data, _, _ = network3.load_data_shared(
"../data/mnist_expanded.pkl.gz")
for j in range(3):
print "Training with expanded data, %s neurons in the FC layer, run num %s" % (n, j)
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
filter_shape=(40, 20, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
FullyConnectedLayer(n_in=40*4*4, n_out=n, activation_fn=ReLU),
SoftmaxLayer(n_in=n, n_out=10)], mini_batch_size)
net.SGD(expanded_training_data, 60, mini_batch_size, 0.03,
validation_data, test_data, lmbda=0.1)
return net
def expanded_data_double_fc(n=100):
"""n is the number of neurons in both fully-connected layers. We'll
try n=100, 300, and 1000.
"""
expanded_training_data, _, _ = network3.load_data_shared(
"../data/mnist_expanded.pkl.gz")
for j in range(3):
print "Training with expanded data, %s neurons in two FC layers, run num %s" % (n, j)
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
filter_shape=(40, 20, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
FullyConnectedLayer(n_in=40*4*4, n_out=n, activation_fn=ReLU),
FullyConnectedLayer(n_in=n, n_out=n, activation_fn=ReLU),
SoftmaxLayer(n_in=n, n_out=10)], mini_batch_size)
net.SGD(expanded_training_data, 60, mini_batch_size, 0.03,
validation_data, test_data, lmbda=0.1)
def double_fc_dropout(p0, p1, p2, repetitions):
expanded_training_data, _, _ = network3.load_data_shared(
"../data/mnist_expanded.pkl.gz")
nets = []
for j in range(repetitions):
print "\n\nTraining using a dropout network with parameters ",p0,p1,p2
print "Training with expanded data, run num %s" % j
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
filter_shape=(40, 20, 5, 5),
poolsize=(2, 2),
activation_fn=ReLU),
FullyConnectedLayer(
n_in=40*4*4, n_out=1000, activation_fn=ReLU, p_dropout=p0),
FullyConnectedLayer(
n_in=1000, n_out=1000, activation_fn=ReLU, p_dropout=p1),
SoftmaxLayer(n_in=1000, n_out=10, p_dropout=p2)], mini_batch_size)
net.SGD(expanded_training_data, 40, mini_batch_size, 0.03,
validation_data, test_data)
nets.append(net)
return nets
def ensemble(nets):
"""Takes as input a list of nets, and then computes the accuracy on
the test data when classifications are computed by taking a vote
amongst the nets. Returns a tuple containing a list of indices
for test data which is erroneously classified, and a list of the
corresponding erroneous predictions.
Note that this is a quick-and-dirty kluge: it'd be more reusable
(and faster) to define a Theano function taking the vote. But
this works.
"""
test_x, test_y = test_data
for net in nets:
i = T.lscalar() # mini-batch index
net.test_mb_predictions = theano.function(
[i], net.layers[-1].y_out,
givens={
net.x:
test_x[i*net.mini_batch_size: (i+1)*net.mini_batch_size]
})
net.test_predictions = list(np.concatenate(
[net.test_mb_predictions(i) for i in xrange(1000)]))
all_test_predictions = zip(*[net.test_predictions for net in nets])
def plurality(p): return Counter(p).most_common(1)[0][0]
plurality_test_predictions = [plurality(p)
for p in all_test_predictions]
test_y_eval = test_y.eval()
error_locations = [j for j in xrange(10000)
if plurality_test_predictions[j] != test_y_eval[j]]
erroneous_predictions = [plurality(all_test_predictions[j])
for j in error_locations]
print "Accuracy is {:.2%}".format((1-len(error_locations)/10000.0))
return error_locations, erroneous_predictions
def plot_errors(error_locations, erroneous_predictions=None):
test_x, test_y = test_data[0].eval(), test_data[1].eval()
fig = plt.figure()
error_images = [np.array(test_x[i]).reshape(28, -1) for i in error_locations]
n = min(40, len(error_locations))
for j in range(n):
ax = plt.subplot2grid((5, 8), (j/8, j % 8))
ax.matshow(error_images[j], cmap = matplotlib.cm.binary)
ax.text(24, 5, test_y[error_locations[j]])
if erroneous_predictions:
ax.text(24, 24, erroneous_predictions[j])
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.tight_layout()
return plt
def plot_filters(net, layer, x, y):
"""Plot the filters for net after the (convolutional) layer number
layer. They are plotted in x by y format. So, for example, if we
have 20 filters after layer 0, then we can call show_filters(net, 0, 5, 4) to
get a 5 by 4 plot of all filters."""
filters = net.layers[layer].w.eval()
fig = plt.figure()
for j in range(len(filters)):
ax = fig.add_subplot(y, x, j)
ax.matshow(filters[j][0], cmap = matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.tight_layout()
return plt
#### Helper method to run all experiments in the book
def run_experiments():
"""Run the experiments described in the book. Note that the later
experiments require access to the expanded training data, which
can be generated by running expand_mnist.py.
"""
shallow()
basic_conv()
omit_FC()
dbl_conv(activation_fn=sigmoid)
# omitted, but still interesting: regularized_dbl_conv()
dbl_conv_relu()
expanded_data(n=100)
expanded_data(n=300)
expanded_data(n=1000)
expanded_data_double_fc(n=100)
expanded_data_double_fc(n=300)
expanded_data_double_fc(n=1000)
nets = double_fc_dropout(0.5, 0.5, 0.5, 5)
# plot the erroneous digits in the ensemble of nets just trained
error_locations, erroneous_predictions = ensemble(nets)
plt = plot_errors(error_locations, erroneous_predictions)
plt.savefig("ensemble_errors.png")
# plot the filters learned by the first of the nets just trained
plt = plot_filters(nets[0], 0, 5, 4)
plt.savefig("net_full_layer_0.png")
plt = plot_filters(nets[0], 1, 8, 5)
plt.savefig("net_full_layer_1.png")
|
mit
|
brainstorm/bcbio-nextgen
|
bcbio/structural/validate.py
|
1
|
9523
|
"""Provide validation of structural variations against truth sets.
Tests overlaps of the combined ensemble structural variant BED against
a set of known regions. Requires any overlap between ensemble set and
known regions, and removes regions from analysis that overlap with
exclusion regions.
"""
import csv
import os
import toolz as tz
import numpy as np
import pandas as pd
import pybedtools
from bcbio.log import logger
from bcbio import utils
from bcbio.pipeline import datadict as dd
from bcbio.structural import convert
from bcbio.distributed.transaction import file_transaction
from bcbio.variation import ploidy
mpl = utils.LazyImport("matplotlib")
plt = utils.LazyImport("matplotlib.pyplot")
sns = utils.LazyImport("seaborn")
EVENT_SIZES = [(100, 450), (450, 2000), (2000, 4000), (4000, 20000), (20000, 60000),
(60000, int(1e6))]
def _stat_str(x, n):
if n > 0:
val = float(x) / float(n) * 100.0
return {"label": "%.1f%% (%s / %s)" % (val, x, n), "val": val}
else:
return {"label": "", "val": 0}
def cnv_to_event(name, data):
"""Convert a CNV to an event name.
"""
cur_ploidy = ploidy.get_ploidy([data])
if name.startswith("cnv"):
num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")])
if num < cur_ploidy:
return "DEL"
elif num > cur_ploidy:
return "DUP"
else:
return name
else:
return name
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data):
"""Compare a ensemble results for a caller against a specific caller and SV type.
"""
def cnv_matches(name):
return cnv_to_event(name, data) == svtype
def is_breakend(name):
return name.startswith("BND")
def in_size_range(max_buffer=0):
def _work(feat):
minf, maxf = size_range
buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0))
size = feat.end - feat.start
return size >= max([0, minf - buffer]) and size < maxf + buffer
return _work
def is_caller_svtype(feat):
for name in feat.name.split(","):
if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name))
and (caller == "sv-ensemble" or name.endswith(caller))):
return True
return False
minf, maxf = size_range
efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge()
tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas()
etotal = efeats.count()
ttotal = tfeats.count()
match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count()
return {"sensitivity": _stat_str(match, ttotal),
"precision": _stat_str(match, etotal)}
def _evaluate_multi(calls, truth_svtypes, work_dir, data):
base = os.path.join(work_dir, "%s-sv-validate" % (dd.get_sample_name(data)))
out_file = base + ".csv"
df_file = base + "-df.csv"
if any((not utils.file_uptodate(out_file, x["vrn_file"])
or not utils.file_uptodate(df_file, x["vrn_file"])) for x in calls):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with open(df_file, "w") as df_out_handle:
writer = csv.writer(out_handle)
dfwriter = csv.writer(df_out_handle)
writer.writerow(["svtype", "size", "caller", "sensitivity", "precision"])
dfwriter.writerow(["svtype", "size", "caller", "metric", "value", "label"])
for svtype, truth in truth_svtypes.items():
for size in EVENT_SIZES:
str_size = "%s-%s" % size
for call in calls:
call_bed = convert.to_bed(call, dd.get_sample_name(data), work_dir, calls, data)
if utils.file_exists(call_bed):
evalout = _evaluate_one(call["variantcaller"], svtype, size, call_bed,
truth, data)
writer.writerow([svtype, str_size, call["variantcaller"],
evalout["sensitivity"]["label"], evalout["precision"]["label"]])
for metric in ["sensitivity", "precision"]:
dfwriter.writerow([svtype, str_size, call["variantcaller"], metric,
evalout[metric]["val"], evalout[metric]["label"]])
return out_file, df_file
def _plot_evaluation(df_csv):
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
mpl.use('Agg', force=True)
df = pd.read_csv(df_csv).fillna("0%")
out = {}
for event in df["svtype"].unique():
out[event] = _plot_evaluation_event(df_csv, event)
return out
def _plot_evaluation_event(df_csv, svtype):
"""Provide plot of evaluation metrics for an SV event, stratified by event size.
"""
titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications",
"INS": "Insertions"}
out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype)
sns.set(style='white')
if not utils.file_uptodate(out_file, df_csv):
metrics = ["sensitivity", "precision"]
df = pd.read_csv(df_csv).fillna("0%")
df = df[(df["svtype"] == svtype)]
event_sizes = _find_events_to_include(df, EVENT_SIZES)
fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True)
if len(event_sizes) == 1:
axs = [axs]
callers = sorted(df["caller"].unique())
if "sv-ensemble" in callers:
callers.remove("sv-ensemble")
callers.append("sv-ensemble")
for i, size in enumerate(event_sizes):
size_label = "%s to %sbp" % size
size = "%s-%s" % size
for j, metric in enumerate(metrics):
ax = axs[i][j]
ax.get_xaxis().set_ticks([])
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(0, 125.0)
if i == 0:
ax.set_title(metric, size=12, y=1.2)
vals, labels = _get_plot_val_labels(df, size, metric, callers)
ax.barh(np.arange(len(vals)), vals)
if j == 0:
ax.tick_params(axis='y', which='major', labelsize=8)
ax.locator_params(nbins=len(callers) + 2, axis="y", tight=True)
ax.set_yticklabels(callers, va="bottom")
ax.text(100, len(callers), size_label, fontsize=10)
else:
ax.get_yaxis().set_ticks([])
for ai, (val, label) in enumerate(zip(vals, labels)):
ax.annotate(label, (val + 0.75, ai + 0.35), va='center', size=7)
if svtype in titles:
fig.text(0.025, 0.95, titles[svtype], size=14)
fig.set_size_inches(7, len(event_sizes) + 1)
fig.savefig(out_file)
return out_file
def _find_events_to_include(df, event_sizes):
out = []
for size in event_sizes:
str_size = "%s-%s" % size
curdf = df[(df["size"] == str_size) & (df["metric"] == "sensitivity")]
for val in list(curdf["label"]):
if val != "0%":
out.append(size)
break
return out
def _get_plot_val_labels(df, size, metric, callers):
curdf = df[(df["size"] == size) & (df["metric"] == metric)]
vals, labels = [], []
for caller in callers:
row = curdf[curdf["caller"] == caller]
val = list(row["value"])[0]
if val == 0:
val = 0.1
vals.append(val)
labels.append(list(row["label"])[0])
return vals, labels
def evaluate(data):
"""Provide evaluations for multiple callers split by structural variant type.
"""
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "validate"))
truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data)
if truth_sets and data.get("sv"):
val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data)
summary_plots = _plot_evaluation(df_csv)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv}
return data
if __name__ == "__main__":
#_, df_csv = _evaluate_multi(["lumpy", "delly", "wham", "sv-ensemble"],
# {"DEL": "synthetic_challenge_set3_tumor_20pctmasked_truth_sv_DEL.bed"},
# "syn3-tumor-ensemble-filter.bed", "sv_exclude.bed")
#_, df_csv = _evaluate_multi(["lumpy", "delly", "cn_mops", "sv-ensemble"],
# {"DEL": "NA12878.50X.ldgp.molpb_val.20140508.bed"},
# "NA12878-ensemble.bed", "LCR.bed.gz")
import sys
_plot_evaluation(sys.argv[1])
|
mit
|
BrandonLMorris/ml-examples
|
mnist/tensorflow/cnn.py
|
1
|
5683
|
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import argparse
import os
import matplotlib.pyplot as pyplot
EPOCHS = 5000
MINIBATCH_SIZE = 50
SAVE_PATH = 'mnist-cnn-model.cpkt'
def cnn(x):
"""Definition of our model for a deep convolutional neural network
This method serves as the main definition of our classifier model.
:param x: The input image tensor
:returns: A tuple that contains the output activations and the dropout
probability used
"""
relu = tf.nn.relu # shorthand
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(x, [-1, 28, 28, 1])
# Layer 1 (convolutional)
W_conv1 = weights([5, 5, 1, 32]) # Output: 24x24x32
b_conv1 = bias([32])
h_conv1 = relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool(h_conv1)
# Layer 2 (convolutional)
W_conv2 = weights([5, 5, 32, 64]) # Output: 19x19x64
b_conv2 = bias([64])
h_conv2 = relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool(h_conv2)
# Layer 3 (fully connected)
W_fc1 = weights([7 * 7 * 64, 1024])
b_fc1 = bias([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Apply dropout to prevent overfitting
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Last layer (fully connected)
W_fc2 = weights([1024, 10])
b_fc2 = bias([10])
y_hat = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_hat, keep_prob
def weights(shape):
"""Helper method; creates some randomly initialized weights"""
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias(shape):
"""Helper method; creates some randomly initialized biases"""
return tf.Variable(tf.constant(0.1, shape=shape))
def conv2d(x, W):
"""Simplify our convolutional calls"""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x):
"""Simplify our pooling calls"""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def main(_):
# Gather the data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Describe our model
x = tf.placeholder(tf.float32, [None, 784])
y_true = tf.placeholder(tf.float32, [None, 10])
y_hat, keep_prob = cnn(x)
# Measure error and optimize (describe the trianing procedure)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_hat))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Actually run the training procedure (or load the pre-trained model)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if FLAGS.no_cache:
for i in range(EPOCHS):
# Train on minibatches of the data
batch = mnist.train.next_batch(MINIBATCH_SIZE)
# Mark our progress during training
if i % 100 == 0:
print('Epoch {}: Accuracy {}%'.format(
i, accuracy.eval(feed_dict={
x:batch[0],
y_true:batch[1],
keep_prob:1.0
})))
# Run the training step
train_step.run(feed_dict={
x:batch[0], y_true:batch[1], keep_prob:0.5})
saver.save(sess, SAVE_PATH)
else:
saver.restore(sess, SAVE_PATH)
# Evaluate our acurracy
test_acc = accuracy.eval(feed_dict={
x:mnist.test.images, y_true:mnist.test.labels, keep_prob:1.0
})
print('Test accuracy is {:.2f}'.format(test_acc * 100))
# Try out some adversarial examples
img, label = mnist.train.next_batch(10)
ae = fgsm(x, y_true, y_hat, 0.1).eval(feed_dict={
x:img, y_true:label, keep_prob:1.0
})
ae_logits = y_hat.eval(feed_dict={x:ae, y_true:label, keep_prob:1.0})
# Print out some examples
for i in range(10):
pic = ae[i].reshape([28, 28])
pyplot.imshow(pic, cmap='gray')
pyplot.title('Classified as {}'.format(tf.argmax(ae_logits, 1).eval()[i]))
pyplot.show()
def fgsm(x, y_true, y_hat, epsilon=0.075):
"""Calculates the fast gradient sign method adversarial attack
Following the FGSM algorithm, determines the gradient of the cost function
wrt the input, then perturbs all the input in the direction that will cause
the greatest error, with small magnitude.
"""
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=y_true, logits=y_hat)
grad, = tf.gradients(loss, x)
scaled_grad = epsilon * tf.sign(grad)
return tf.stop_gradient(x + scaled_grad)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument('--no_cache', type=bool,
default=False,
help='Set if want to train model from scratch')
FLAGS, unparsed = parser.parse_known_args()
# If we don't have a saved versoin of the model, we'll have to train it
if not os.path.exists(SAVE_PATH + '.index'):
FLAGS.no_cache = True
tf.app.run(main=main)
|
apache-2.0
|
Parallel-in-Time/pySDC
|
pySDC/playgrounds/deprecated/pmesh/playground_pmesh_comm.py
|
1
|
1873
|
from mpi4py import MPI
import matplotlib
matplotlib.use("TkAgg")
import numpy as np
import pfft
import time
import matplotlib.pyplot as plt
from numpy.fft import rfft2, irfft2
from pmesh.pm import ParticleMesh, RealField, ComplexField
def doublesine(i, v):
r = [ii * (Li / ni) for ii, ni, Li in zip(i, v.Nmesh, v.BoxSize)]
# xx, yy = np.meshgrid(r[0], r[1])
return np.sin(2*np.pi*r[0]) * np.sin(2*np.pi*r[1])
nvars = 128
nruns = 1
# t0 = time.time()
# pm = ParticleMesh(BoxSize=1.0, Nmesh=[nvars] * 2, dtype='f8', plan_method='measure', comm=None)
# u = pm.create(type='real')
# u = u.apply(doublesine, kind='index', out=Ellipsis)
#
# res = 0
# for i in range(nruns):
# tmp = u.preview()
# # print(type(u.value))
# res = max(res, np.linalg.norm(tmp))
# print(res)
# t1 = time.time()
#
# print(f'PMESH setup time: {t1 - t0:6.4f} sec.')
#
# exit()
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
color = int(world_rank / 2)
space_comm = comm.Split(color=color)
space_size = space_comm.Get_size()
space_rank = space_comm.Get_rank()
color = int(world_rank % 2)
time_comm = comm.Split(color=color)
time_size = time_comm.Get_size()
time_rank = time_comm.Get_rank()
print(world_rank, time_rank, space_rank)
t0 = time.time()
if time_rank == 0:
pm = ParticleMesh(BoxSize=1.0, Nmesh=[nvars] * 2, dtype='f8', plan_method='measure', comm=space_comm)
u = pm.create(type='real')
u = u.apply(doublesine, kind='index', out=Ellipsis)
time_comm.send(u.value, dest=1, tag=11)
else:
pm = ParticleMesh(BoxSize=1.0, Nmesh=[nvars] * 2, dtype='f8', plan_method='measure', comm=space_comm)
tmp = time_comm.recv(source=0, tag=11)
u = pm.create(type='real', value=tmp)
t1 = time.time()
print(f'PMESH setup time: {t1 - t0:6.4f} sec.')
exit()
|
bsd-2-clause
|
ajrichards/notebook
|
deep-learning/draw_complex_nn.py
|
2
|
3089
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def draw_neural_net(ax, left, right, bottom, top, layer_sizes):
'''
Draw a neural network cartoon using matplotilb.
:usage:
>>> fig = plt.figure(figsize=(12, 12))
>>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])
:parameters:
- ax : matplotlib.axes.AxesSubplot
The axes on which to plot the cartoon (get e.g. by plt.gca())
- left : float
The center of the leftmost node(s) will be placed here
- right : float
The center of the rightmost node(s) will be placed here
- bottom : float
The center of the bottommost node(s) will be placed here
- top : float
The center of the topmost node(s) will be placed here
- layer_sizes : list of int
List of layer sizes, including input and output dimensionality
'''
n_layers = len(layer_sizes)
v_spacing = (top - bottom)/float(max(layer_sizes))
h_spacing = (right - left)/float(len(layer_sizes) - 1)
## create the nodes with patches
patch_keys = {}
for n, layer_size in enumerate(layer_sizes):
layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.
for m in range(layer_size):
x = n*h_spacing + left
y = layer_top - m*v_spacing
if n == 0:
color = 'darkorange'
label = 'input'
elif n == len(layer_sizes)-1:
color = 'dodgerblue'
label = 'output'
else:
color = 'mediumpurple'
label = 'hidden'
p = mpatches.Circle((x, y), v_spacing/3.5, ec='k',fc=color)
patch_keys[label] = p
ax.add_patch(p)
## create the edges with annotations
for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.
layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.
for m in range(layer_size_a):
for o in range(layer_size_b):
a = n*h_spacing + left
b = (n + 1)*h_spacing + left
c = layer_top_a - m*v_spacing
d = layer_top_b - o*v_spacing
ax.annotate('', xy=(b,d), xycoords='data',
xytext=(a,c), textcoords='data',
arrowprops=dict(facecolor='black',
arrowstyle='->',
shrinkA=18,
shrinkB=18,
)
)
ax.legend(patch_keys.values(), patch_keys.keys())
if __name__ == "__main__":
fig = plt.figure(figsize=(10, 10))
ax = fig.gca()
ax.axis('off')
draw_neural_net(ax, .1, .9, .1, .9, [4, 10, 7, 5,7, 2])
fig.savefig('nn-complex.png')
plt.show()
|
bsd-3-clause
|
mhue/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
347
|
3268
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
bsd-3-clause
|
spinningbytes/deep-mlsa
|
code/evaluation/senti_lang_eval_sentence_emb_output.py
|
1
|
1323
|
from keras.utils.np_utils import probas_to_classes
from sklearn.metrics import f1_score
import numpy as np
def evaluate(model, test_iteraotr, experiment_name, ofname):
inputs = test_iteraotr.input_data
outputs = test_iteraotr.output_data
names = test_iteraotr.names
for i, o, n in zip(inputs, outputs, names):
ofile = open(ofname, 'wt')
y_test_senti = probas_to_classes(o)
model_output = model.predict(i)
sentence_embeddings = model_output[1]
y_pred = model_output[0]
y_pred_senti = y_pred
y_pred_senti_cls = probas_to_classes(y_pred_senti)
f1_score_senti = f1_score(y_test_senti, y_pred_senti_cls, average=None, pos_label=None, labels=[0, 1, 2])
output_line = '{}\n'.format(n)
output_line += 'Sentiment:\tF1 Score:{}\tF1 Score SEval:{}\tF1 Score Neg:{}\tF1 Score Neut:{}\tF1 Score Pos:{}\n'.format(
np.mean(f1_score_senti),
0.5 * (f1_score_senti[0] + f1_score_senti[2]),
f1_score_senti[0],
f1_score_senti[1],
f1_score_senti[2]
)
for list in sentence_embeddings:
slist = [str(x) for x in list.tolist()]
file_output_line = '{}\n'.format(' '.join(slist))
ofile.write(file_output_line)
return output_line
|
apache-2.0
|
rsivapr/scikit-learn
|
sklearn/mixture/gmm.py
|
5
|
27249
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state, deprecated
from ..utils.extmath import logsumexp, pinvh
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
`weights_` : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
@deprecated("GMM.eval was renamed to GMM.score_samples in 0.14 and will be"
" removed in 0.16.")
def eval(self, X):
return self.score_samples(X)
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.score_samples(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
bsd-3-clause
|
aabadie/scikit-learn
|
sklearn/utils/tests/test_murmurhash.py
|
65
|
2838
|
# Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
|
bsd-3-clause
|
nmearl/pyqtgraph
|
pyqtgraph/widgets/MatplotlibWidget.py
|
30
|
1442
|
from ..Qt import QtGui, QtCore, USE_PYSIDE, USE_PYQT5
import matplotlib
if not USE_PYQT5:
if USE_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
else:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
|
mit
|
vshtanko/scikit-learn
|
examples/linear_model/plot_lasso_model_selection.py
|
311
|
5431
|
"""
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
|
bsd-3-clause
|
anntzer/scikit-learn
|
examples/preprocessing/plot_discretization_strategies.py
|
43
|
3052
|
# -*- coding: utf-8 -*-
"""
==========================================================
Demonstrating the different strategies of KBinsDiscretizer
==========================================================
This example presents the different strategies implemented in KBinsDiscretizer:
- 'uniform': The discretization is uniform in each feature, which means that
the bin widths are constant in each dimension.
- quantile': The discretization is done on the quantiled values, which means
that each bin has approximately the same number of samples.
- 'kmeans': The discretization is based on the centroids of a KMeans clustering
procedure.
The plot shows the regions where the discretized encoding is constant.
"""
# Author: Tom Dupré la Tour
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.datasets import make_blobs
print(__doc__)
strategies = ['uniform', 'quantile', 'kmeans']
n_samples = 200
centers_0 = np.array([[0, 0], [0, 5], [2, 4], [8, 8]])
centers_1 = np.array([[0, 0], [3, 1]])
# construct the datasets
random_state = 42
X_list = [
np.random.RandomState(random_state).uniform(-3, 3, size=(n_samples, 2)),
make_blobs(n_samples=[n_samples // 10, n_samples * 4 // 10,
n_samples // 10, n_samples * 4 // 10],
cluster_std=0.5, centers=centers_0,
random_state=random_state)[0],
make_blobs(n_samples=[n_samples // 5, n_samples * 4 // 5],
cluster_std=0.5, centers=centers_1,
random_state=random_state)[0],
]
figure = plt.figure(figsize=(14, 9))
i = 1
for ds_cnt, X in enumerate(X_list):
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
ax.scatter(X[:, 0], X[:, 1], edgecolors='k')
if ds_cnt == 0:
ax.set_title("Input data", size=14)
xx, yy = np.meshgrid(
np.linspace(X[:, 0].min(), X[:, 0].max(), 300),
np.linspace(X[:, 1].min(), X[:, 1].max(), 300))
grid = np.c_[xx.ravel(), yy.ravel()]
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# transform the dataset with KBinsDiscretizer
for strategy in strategies:
enc = KBinsDiscretizer(n_bins=4, encode='ordinal', strategy=strategy)
enc.fit(X)
grid_encoded = enc.transform(grid)
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
# horizontal stripes
horizontal = grid_encoded[:, 0].reshape(xx.shape)
ax.contourf(xx, yy, horizontal, alpha=.5)
# vertical stripes
vertical = grid_encoded[:, 1].reshape(xx.shape)
ax.contourf(xx, yy, vertical, alpha=.5)
ax.scatter(X[:, 0], X[:, 1], edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title("strategy='%s'" % (strategy, ), size=14)
i += 1
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
pnedunuri/scikit-learn
|
examples/neighbors/plot_classification.py
|
287
|
1790
|
"""
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
bsd-3-clause
|
wesm/statsmodels
|
scikits/statsmodels/examples/tsa/ex_dates.py
|
1
|
1945
|
import scikits.statsmodels.api as sm
import numpy as np
import pandas
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
# We can use scikits.timeseries and datetime to create this array.
import datetime
import scikits.timeseries as ts
dates = ts.date_array(start_date=1700, length=len(data.endog), freq='A')
# To make an array of datetime types, we need an integer array of ordinals
#.. from datetime import datetime
#.. dt_dates = dates.toordinal().astype(int)
#.. dt_dates = np.asarray([datetime.fromordinal(i) for i in dt_dates])
dt_dates = dates.tolist()
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pandas.Series(data.endog, index=dt_dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print pred
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dt_dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print pred
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print ar_res._data.predict_dates
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
# Using scikits.timeseries
# ------------------------
ts_data = ts.time_series(data.endog, dates=dates)
ts_ar_model = sm.tsa.AR(ts_data, freq='A')
ts_ar_res = ts_ar_model.fit(maxlag=9)
# Using Larry
# -----------
import la
larr = la.larry(data.endog, [dt_dates])
la_ar_model = sm.tsa.AR(larr, freq='A')
la_ar_res = la_ar_model.fit(maxlag=9)
|
bsd-3-clause
|
ejeschke/ginga
|
ginga/examples/matplotlib/example2_mpl.py
|
3
|
10002
|
#! /usr/bin/env python
#
# example2_mpl.py -- Simple, configurable FITS viewer using a matplotlib
# QtAgg backend for Ginga and embedded in a Qt program.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
Usage:
example2_mpl.py [fits file]
You need Qt4 with python bindings (or pyside) installed to run this example.
"""
import sys
from matplotlib.figure import Figure
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.mplw.ImageViewCanvasMpl import ImageViewCanvas
from ginga.mplw.FigureCanvasQt import FigureCanvas
from ginga.misc import log
from ginga import colors
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.util.loader import load_data
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger):
super(FitsViewer, self).__init__()
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
fig = Figure()
w = FigureCanvas(fig)
fi = ImageViewCanvas(logger=logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.enable_draw(False)
fi.set_callback('drag-drop', self.drop_file_cb)
fi.set_callback('cursor-changed', self.cursor_cb)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_set_active(True)
self.fitsimage = fi
fi.set_figure(fig)
bd = fi.get_bindings()
bd.enable_all(True)
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue')
canvas.set_surface(fi)
self.canvas = canvas
# add canvas to view
fi.get_canvas().add(canvas)
canvas.ui_set_active(True)
canvas.register_for_cursor_drawing(fi)
canvas.add_callback('draw-event', self.draw_cb)
w.resize(512, 512)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wdrawtype = QtGui.QComboBox()
self.drawtypes = fi.get_drawtypes()
for name in self.drawtypes:
wdrawtype.addItem(name)
index = self.drawtypes.index('rectangle')
wdrawtype.setCurrentIndex(index)
wdrawtype.activated.connect(self.set_drawparams)
self.wdrawtype = wdrawtype
wdrawcolor = QtGui.QComboBox()
for name in self.drawcolors:
wdrawcolor.addItem(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.setCurrentIndex(index)
wdrawcolor.activated.connect(self.set_drawparams)
self.wdrawcolor = wdrawcolor
wfill = QtGui.QCheckBox("Fill")
wfill.stateChanged.connect(self.set_drawparams)
self.wfill = wfill
walpha = QtGui.QDoubleSpinBox()
walpha.setRange(0.0, 1.0)
walpha.setSingleStep(0.1)
walpha.setValue(1.0)
walpha.valueChanged.connect(self.set_drawparams)
self.walpha = walpha
wclear = QtGui.QPushButton("Clear Canvas")
wclear.clicked.connect(self.clear_canvas)
wopen = QtGui.QPushButton("Open File")
wopen.clicked.connect(self.open_file)
wquit = QtGui.QPushButton("Quit")
wquit.clicked.connect(self.close)
hbox.addStretch(1)
for w in (wopen, wdrawtype, wdrawcolor, wfill,
QtGui.QLabel('Alpha:'), walpha, wclear, wquit):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
vbox.addWidget(hw, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
btn1 = QtGui.QRadioButton("Draw")
btn1.setChecked(mode == 'draw')
btn1.toggled.connect(lambda val: self.set_mode_cb('draw', val))
btn1.setToolTip("Choose this to draw on the canvas")
hbox.addWidget(btn1)
btn2 = QtGui.QRadioButton("Edit")
btn2.setChecked(mode == 'edit')
btn2.toggled.connect(lambda val: self.set_mode_cb('edit', val))
btn2.setToolTip("Choose this to edit things on the canvas")
hbox.addWidget(btn2)
btn3 = QtGui.QRadioButton("Pick")
btn3.setChecked(mode == 'pick')
btn3.toggled.connect(lambda val: self.set_mode_cb('pick', val))
btn3.setToolTip("Choose this to pick things on the canvas")
hbox.addWidget(btn3)
hbox.addWidget(QtGui.QLabel(''), stretch=1)
hw = QtGui.QWidget()
hw.setLayout(hbox)
vbox.addWidget(hw, stretch=0)
vw = QtGui.QWidget()
self.setCentralWidget(vw)
vw.setLayout(vbox)
def set_drawparams(self, kind):
index = self.wdrawtype.currentIndex()
kind = self.drawtypes[index]
index = self.wdrawcolor.currentIndex()
fill = (self.wfill.checkState() != 0)
alpha = self.walpha.value()
params = {'color': self.drawcolors[index],
'alpha': alpha,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.delete_all_objects()
def load_file(self, filepath):
image = load_data(filepath, logger=self.logger)
self.fitsimage.set_image(image)
self.setWindowTitle(filepath)
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file_cb(self, viewer, paths):
fileName = paths[0]
self.load_file(fileName)
def cursor_cb(self, viewer, button, data_x, data_y):
"""This gets called when the data position relative to the cursor
changes.
"""
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x + viewer.data_off),
int(data_y + viewer.data_off))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def set_mode_cb(self, mode, tf):
self.logger.info("canvas mode changed (%s) %s" % (mode, tf))
if not (tf is False):
self.canvas.set_draw_mode(mode)
return True
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
obj.add_callback('pick-down', self.pick_cb, 'down')
obj.add_callback('pick-up', self.pick_cb, 'up')
obj.add_callback('pick-move', self.pick_cb, 'move')
obj.add_callback('pick-hover', self.pick_cb, 'hover')
obj.add_callback('pick-enter', self.pick_cb, 'enter')
obj.add_callback('pick-leave', self.pick_cb, 'leave')
obj.add_callback('pick-key', self.pick_cb, 'key')
obj.pickable = True
obj.add_callback('edited', self.edit_cb)
def pick_cb(self, obj, canvas, event, pt, ptype):
self.logger.info("pick event '%s' with obj %s at (%.2f, %.2f)" % (
ptype, obj.kind, pt[0], pt[1]))
return True
def edit_cb(self, obj):
self.logger.info("object %s has been edited" % (obj.kind))
return True
def main(options, args):
app = QtGui.QApplication(args)
logger = log.get_logger(name="example2", options=options)
w = FitsViewer(logger)
w.resize(524, 540)
w.show()
app.setActiveWindow(w)
w.raise_()
w.activateWindow()
if len(args) > 0:
w.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
log.addlogopts(argprs)
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
|
bsd-3-clause
|
rvbelefonte/Rockfish2
|
rockfish2/database/database.py
|
1
|
11247
|
"""
Database tools
"""
import os
import pandas as pd
from pandas.io import sql as psql
#from sqlitedict import SqliteDict
from rockfish2 import logging
#XXX dev
#from logbook import Logger
#logging = Logger('dev-database')
from sqlite3 import OperationalError
try:
from pysqlite2 import dbapi2
OperationalError = dbapi2.OperationalError
db = dbapi2.Connection(':memory:')
db.execute('SELECT load_extension("libspatialite")')
except OperationalError:
msg = 'pysqlite2 found, but cannot load libspatialite extension.'
msg += ' Looking for pyspatialite instead.'
logging.warning(msg)
try:
from pyspatialite import dbapi2
except ImportError:
msg = 'Falling back to default sqlite3 library'
msg += ', spatialite tools will likely fail.'
logging.warning(msg)
from sqlite3 import dbapi2
def _process_exception(exception, message, warn=False):
if warn:
logging.warn(message)
return
if exception.__class__.__name__ == 'OperationalError':
raise DatabaseOperationalError(message)
elif exception.__class__.__name__ == 'IntegrityError':
raise DatabaseIntegrityError(message)
else:
raise DatabaseError(message)
class DatabaseError(Exception):
"""
Raised if there is a problem acessing the SQLite database
"""
pass
class DatabaseOperationalError(DatabaseError):
"""
Raised if there is a problem with SQL code
"""
pass
class DatabaseIntegrityError(DatabaseError):
"""
Raised if executing SQL causes an IntegrtityError
"""
pass
class Connection(dbapi2.Connection):
def __init__(self, database=':memory:', spatial=False,
params_table=None):
if os.path.isfile(database):
logging.info('Connecting to existing database: {:}',
database)
else:
logging.info('Creating new database: {:}', database)
dbapi2.Connection.__init__(self, database)
self.row_factory = dbapi2.Row
if params_table is not None:
if database == ':memory:':
self._PARAMS = {}
else:
# XXX overriding persisant parameter table for bugfix
self._PARAMS = {}
#XXX
#logging.debug("Mapping parameters to database table '{:}'",
# params_table)
#self._PARAMS = SqliteDict(os.path.abspath(database),
# tablename=params_table, autocommit=True)
self.__dict__[params_table] = self._PARAMS
if spatial:
self._init_spatial()
#def __setattr__(self, name, value):
# """
# Called when an attribute assignment is attempted. This is called
# instead of the normal mechanism (i.e. store the value in the instance
# dictionary). name is the attribute name, value is the value to be
# assigned to it.
# """
# if hasattr(self, '_PARAMS') and (name.lower() in self._PARAMS):
# # update value in params table
# self._PARAMS[name.lower()] = value
# else:
# # just set a normal attribute
# self.__dict__[name] = value
#def __getattr__(self, name):
# """
# This method is only called if the attribute is not found in the usual
# places (i.e. not an instance attribute or not found in the class tree
# for self).
# """
# if name.lower() in self._PARAMS:
# return self._PARAMS[name.lower()]
# else:
# msg = "'{:}' object has no attribute '{:}'"\
# .format(self.__class__.__name__, name)
# raise AttributeError(msg)
def _init_spatial(self):
"""
Setup spatial database
"""
self.enable_load_extension(True)
logging.debug('Loading libspatialite')
try:
self.execute('SELECT load_extension("libspatialite")')
except DatabaseOperationalError:
pass
if 'spatial_ref_sys' not in self.tables:
logging.debug("Creating table 'spatial_ref_sys'")
self.execute('SELECT InitSpatialMetadata()')
def _get_pragma(self, table):
"""
Return the SQLite PRAGMA information.
Parameters
----------
table: str
Name of table to get PRAGMA for.
Returns
-------
pragma
Result of pragma query
"""
sql = 'PRAGMA table_info({:})'.format(table)
return self.execute(sql).fetchall()
def _create_table(self, table, fields, if_not_exists=True):
"""
Shortcut function for creating new tables.
Parameters
----------
table: str
Name of the table to create.
fields: array_like
Fields to include in the new table. Must be a list of
(name, sql type, default value, is not null, is primary) tuples.
"""
_fields = []
_primary_fields = []
for f in fields:
#Build: name type [NOT NULL] [DEFAULT default_value]
_fields.append('{:} {:}'.format(f[0], f[1]))
if f[3] is True:
# require NOT NULL
_fields[-1] += ' NOT NULL'
if f[4] is True:
# add to list of primary keys
_primary_fields.append(f[0])
if f[2] is not None:
# include default value
_fields[-1] += ' DEFAULT %s' % f[2]
sql = "CREATE TABLE"
if if_not_exists:
sql += " IF NOT EXISTS"
sql += " '{:}'".format(table)
sql += ' ('
sql += ', '.join(_fields)
if len(_primary_fields) > 0:
sql += ', PRIMARY KEY (' + ', '.join(_primary_fields) + ') '
sql += ');'
self.execute(sql)
def _get_fields(self, table):
"""
Return a list of fields for a table.
Parameters
----------
table: str
Name of table to get fields for.
"""
return [str(row[1]) for row in self._get_pragma(table)]
def _get_primary_fields(self, table):
"""
Return a list of primary fields for a table.
Parameters
----------
table: str
Name of table to get primary fields for.
"""
primary_fields = []
for row in self._get_pragma(table):
if row[5] != 0:
primary_fields.append((row[5], row[1]))
primary_fields.sort()
return [f[1] for f in primary_fields]
def _get_required_fields(self, table):
"""
Return a list of fields that have NOT NULL set.
Parameters
----------
table: str
Name of table to get required fields for.
"""
required_fields = []
for row in self._get_pragma(table):
if row[3] == 1:
required_fields.append(str(row[1]))
return required_fields
def _get_types(self, table):
"""
Return dictionary of data types for fields in a table
Parameters
----------
table: str
Name of table to get field data types for
Returns
_______
type_dict: dict
Dictionary of SQL data types indexed by field names
"""
type_dict = {}
for row in self._get_pragma(table):
type_dict[row[1]] = row[2]
return type_dict
def _get_tables(self):
"""
Returns a list of tables in the database.
"""
sql = "SELECT name FROM sqlite_master WHERE type='table'"
return [d[0] for d in self.execute(sql)]
tables = property(_get_tables)
def _get_views(self):
"""
Returns a list of views in the database.
"""
sql = "SELECT name FROM sqlite_master WHERE type='view'"
return [d[0] for d in self.execute(sql)]
views = property(_get_views)
def execute(self, *args, **kwargs):
"""
Executes a SQL statement.
"""
warn = kwargs.pop('warn_only', False)
logging.debug('Executing SQL:\n{:}', *args)
try:
return dbapi2.Connection.execute(self, *args)
except Exception as e:
msg = "execute() failed with '{:}: {:}'"\
.format(e.__class__.__name__, e.message)
msg += ' while executing: {:}'.format(*args)
_process_exception(e, msg, warn=warn)
def executemany(self, *args, **kwargs):
"""
Executes a SQL statement.
"""
warn = kwargs.pop('warn_only', False)
logging.debug('Executing SQL:\n{:}', args[0])
try:
return dbapi2.Connection.executemany(self, *args)
except Exception as e:
msg = "executemany() failed with '{:}: {:}'"\
.format(e.__class__.__name__, e.message)
msg += ' while executing: {:}'.format(args[0])
_process_exception(e, msg, warn=warn)
def read_sql(self, sql, **kwargs):
"""
Executes a SQL statement and returns a :class:`pandas.DataFrame`
Parameters
----------
sql: str
SQL to execute on the database
**kwargs:
Optional parameters for :meth:`pandas.io.sql.read_sql`
Returns
-------
data: :class:`pandas.DataFrame`
Data from the table
"""
return psql.read_sql(sql, self, **kwargs)
def read_table(self, table):
"""
Reads all rows from a table and returns a
:class:`pandas.DataFrame`
Parameters
----------
table: str
Table name to read data from
Returns
-------
data: :class:`pandas.DataFrame`
Data from the table
"""
sql = 'SELECT * FROM {:}'.format(table)
dat = self.execute(sql).fetchall()
if len(dat) > 0:
return pd.DataFrame(dat, columns=self._get_fields(table))
else:
return psql.read_sql(sql, self)
def count(self, table, **kwargs):
"""
Get the number of rows in a table.
Parameters
----------
table: str
Name of table to get count from.
**kwargs
Keyword arguments for WHERE statements in the query
"""
sql = 'SELECT COUNT(*) FROM {:}'.format(table)
if len(kwargs) > 0:
values = ['{:}="{:}"'.format(k, kwargs[k]) for k in kwargs]
sql += ' WHERE ' + ' and '.join(values)
return self.execute(sql).fetchall()[0][0]
def insert(self, table, **kwargs):
"""
Adds an entry to a table.
Parameters
----------
table: str
Name of table to add data to.
**kwargs
field=value arguments for data to add to the table
"""
sql = 'INSERT INTO %s (%s)' \
% (table, ', '.join([k for k in kwargs]))
sql += ' VALUES (%s)' % ', '.join(['?' for k in kwargs])
data = tuple([kwargs[k] for k in kwargs])
self.execute(sql, data)
|
gpl-2.0
|
petosegan/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
280
|
2541
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
bsd-3-clause
|
GuessWhoSamFoo/pandas
|
pandas/tests/reshape/merge/test_merge_ordered.py
|
2
|
3703
|
from numpy import nan
import pytest
import pandas as pd
from pandas import DataFrame, merge_ordered
from pandas.util.testing import assert_frame_equal
class TestMergeOrdered(object):
def setup_method(self, method):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
def test_basic(self):
result = merge_ordered(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = merge_ordered(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = pd.concat([self.left, self.left], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
result = merge_ordered(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, self.right, on='key', left_by='group')
assert result['group'].notna().all()
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on='key')
assert isinstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat)
]
for df_seq, pattern in test_cases:
with pytest.raises(ValueError, match=pattern):
pd.concat(df_seq)
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
def test_doc_example(self):
left = DataFrame({'group': list('aaabbb'),
'key': ['a', 'c', 'e', 'a', 'c', 'e'],
'lvalue': [1, 2, 3] * 2,
})
right = DataFrame({'key': ['b', 'c', 'd'],
'rvalue': [1, 2, 3]})
result = merge_ordered(left, right, fill_method='ffill',
left_by='group')
expected = DataFrame({'group': list('aaaaabbbbb'),
'key': ['a', 'b', 'c', 'd', 'e'] * 2,
'lvalue': [1, 1, 2, 2, 3] * 2,
'rvalue': [nan, 1, 2, 3, 3] * 2})
assert_frame_equal(result, expected)
|
bsd-3-clause
|
shoyer/xarray
|
xarray/core/resample_cftime.py
|
2
|
13939
|
"""Resampling for CFTimeIndex. Does not support non-integer freq."""
# The mechanisms for resampling CFTimeIndex was copied and adapted from
# the source code defined in pandas.core.resample
#
# For reference, here is a copy of the pandas copyright notice:
#
# BSD 3-Clause License
#
# Copyright (c) 2008-2012, AQR Capital Management, LLC, Lambda Foundry, Inc.
# and PyData Development Team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
import numpy as np
import pandas as pd
from ..coding.cftime_offsets import (
CFTIME_TICKS,
Day,
MonthEnd,
QuarterEnd,
YearEnd,
cftime_range,
normalize_date,
to_offset,
)
from ..coding.cftimeindex import CFTimeIndex
class CFTimeGrouper:
"""This is a simple container for the grouping parameters that implements a
single method, the only one required for resampling in xarray. It cannot
be used in a call to groupby like a pandas.Grouper object can."""
def __init__(self, freq, closed=None, label=None, base=0, loffset=None):
self.freq = to_offset(freq)
self.closed = closed
self.label = label
self.base = base
self.loffset = loffset
if isinstance(self.freq, (MonthEnd, QuarterEnd, YearEnd)):
if self.closed is None:
self.closed = "right"
if self.label is None:
self.label = "right"
else:
if self.closed is None:
self.closed = "left"
if self.label is None:
self.label = "left"
def first_items(self, index):
"""Meant to reproduce the results of the following
grouper = pandas.Grouper(...)
first_items = pd.Series(np.arange(len(index)),
index).groupby(grouper).first()
with index being a CFTimeIndex instead of a DatetimeIndex.
"""
datetime_bins, labels = _get_time_bins(
index, self.freq, self.closed, self.label, self.base
)
if self.loffset is not None:
if isinstance(self.loffset, datetime.timedelta):
labels = labels + self.loffset
else:
labels = labels + to_offset(self.loffset)
# check binner fits data
if index[0] < datetime_bins[0]:
raise ValueError("Value falls before first bin")
if index[-1] > datetime_bins[-1]:
raise ValueError("Value falls after last bin")
integer_bins = np.searchsorted(index, datetime_bins, side=self.closed)[:-1]
first_items = pd.Series(integer_bins, labels)
# Mask duplicate values with NaNs, preserving the last values
non_duplicate = ~first_items.duplicated("last")
return first_items.where(non_duplicate)
def _get_time_bins(index, freq, closed, label, base):
"""Obtain the bins and their respective labels for resampling operations.
Parameters
----------
index : CFTimeIndex
Index object to be resampled (e.g., CFTimeIndex named 'time').
freq : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency (e.g., 'MS', '2D', 'H', or '3T' with
coding.cftime_offsets.to_offset() applied to it).
closed : 'left' or 'right', optional
Which side of bin interval is closed.
The default is 'left' for all frequency offsets except for 'M' and 'A',
which have a default of 'right'.
label : 'left' or 'right', optional
Which bin edge label to label bucket with.
The default is 'left' for all frequency offsets except for 'M' and 'A',
which have a default of 'right'.
base : int, optional
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
Returns
-------
datetime_bins : CFTimeIndex
Defines the edge of resampling bins by which original index values will
be grouped into.
labels : CFTimeIndex
Define what the user actually sees the bins labeled as.
"""
if not isinstance(index, CFTimeIndex):
raise TypeError(
"index must be a CFTimeIndex, but got "
"an instance of %r" % type(index).__name__
)
if len(index) == 0:
datetime_bins = labels = CFTimeIndex(data=[], name=index.name)
return datetime_bins, labels
first, last = _get_range_edges(
index.min(), index.max(), freq, closed=closed, base=base
)
datetime_bins = labels = cftime_range(
freq=freq, start=first, end=last, name=index.name
)
datetime_bins, labels = _adjust_bin_edges(
datetime_bins, freq, closed, index, labels
)
if label == "right":
labels = labels[1:]
else:
labels = labels[:-1]
# TODO: when CFTimeIndex supports missing values, if the reference index
# contains missing values, insert the appropriate NaN value at the
# beginning of the datetime_bins and labels indexes.
return datetime_bins, labels
def _adjust_bin_edges(datetime_bins, offset, closed, index, labels):
"""This is required for determining the bin edges resampling with
daily frequencies greater than one day, month end, and year end
frequencies.
Consider the following example. Let's say you want to downsample the
time series with the following coordinates to month end frequency:
CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00,
2000-02-01 12:00:00], dtype='object')
Without this adjustment, _get_time_bins with month-end frequency will
return the following index for the bin edges (default closed='right' and
label='right' in this case):
CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00,
2000-02-29 00:00:00], dtype='object')
If 2000-01-31 is used as a bound for a bin, the value on
2000-01-31T12:00:00 (at noon on January 31st), will not be included in the
month of January. To account for this, pandas adds a day minus one worth
of microseconds to the bin edges generated by cftime range, so that we do
bin the value at noon on January 31st in the January bin. This results in
an index with bin edges like the following:
CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59,
2000-02-29 23:59:59], dtype='object')
The labels are still:
CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object')
This is also required for daily frequencies longer than one day and
year-end frequencies.
"""
is_super_daily = isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)) or (
isinstance(offset, Day) and offset.n > 1
)
if is_super_daily:
if closed == "right":
datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1)
if datetime_bins[-2] > index.max():
datetime_bins = datetime_bins[:-1]
labels = labels[:-1]
return datetime_bins, labels
def _get_range_edges(first, last, offset, closed="left", base=0):
""" Get the correct starting and ending datetimes for the resampled
CFTimeIndex range.
Parameters
----------
first : cftime.datetime
Uncorrected starting datetime object for resampled CFTimeIndex range.
Usually the min of the original CFTimeIndex.
last : cftime.datetime
Uncorrected ending datetime object for resampled CFTimeIndex range.
Usually the max of the original CFTimeIndex.
offset : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency. Contains information on offset type (e.g. Day or 'D') and
offset magnitude (e.g., n = 3).
closed : 'left' or 'right', optional
Which side of bin interval is closed. Defaults to 'left'.
base : int, optional
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
Returns
-------
first : cftime.datetime
Corrected starting datetime object for resampled CFTimeIndex range.
last : cftime.datetime
Corrected ending datetime object for resampled CFTimeIndex range.
"""
if isinstance(offset, CFTIME_TICKS):
first, last = _adjust_dates_anchored(
first, last, offset, closed=closed, base=base
)
return first, last
else:
first = normalize_date(first)
last = normalize_date(last)
if closed == "left":
first = offset.rollback(first)
else:
first = first - offset
last = last + offset
return first, last
def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
""" First and last offsets should be calculated from the start day to fix
an error cause by resampling across multiple days when a one day period is
not a multiple of the frequency.
See https://github.com/pandas-dev/pandas/issues/8683
Parameters
----------
first : cftime.datetime
A datetime object representing the start of a CFTimeIndex range.
last : cftime.datetime
A datetime object representing the end of a CFTimeIndex range.
offset : xarray.coding.cftime_offsets.BaseCFTimeOffset
The offset object representing target conversion a.k.a. resampling
frequency. Contains information on offset type (e.g. Day or 'D') and
offset magnitude (e.g., n = 3).
closed : 'left' or 'right', optional
Which side of bin interval is closed. Defaults to 'right'.
base : int, optional
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
Returns
-------
fresult : cftime.datetime
A datetime object representing the start of a date range that has been
adjusted to fix resampling errors.
lresult : cftime.datetime
A datetime object representing the end of a date range that has been
adjusted to fix resampling errors.
"""
base = base % offset.n
start_day = normalize_date(first)
base_td = type(offset)(n=base).as_timedelta()
start_day += base_td
foffset = exact_cftime_datetime_difference(start_day, first) % offset.as_timedelta()
loffset = exact_cftime_datetime_difference(start_day, last) % offset.as_timedelta()
if closed == "right":
if foffset.total_seconds() > 0:
fresult = first - foffset
else:
fresult = first - offset.as_timedelta()
if loffset.total_seconds() > 0:
lresult = last + (offset.as_timedelta() - loffset)
else:
lresult = last
else:
if foffset.total_seconds() > 0:
fresult = first - foffset
else:
fresult = first
if loffset.total_seconds() > 0:
lresult = last + (offset.as_timedelta() - loffset)
else:
lresult = last + offset.as_timedelta()
return fresult, lresult
def exact_cftime_datetime_difference(a, b):
"""Exact computation of b - a
Assumes:
a = a_0 + a_m
b = b_0 + b_m
Here a_0, and b_0 represent the input dates rounded
down to the nearest second, and a_m, and b_m represent
the remaining microseconds associated with date a and
date b.
We can then express the value of b - a as:
b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m
By construction, we know that b_0 - a_0 must be a round number
of seconds. Therefore we can take the result of b_0 - a_0 using
ordinary cftime.datetime arithmetic and round to the nearest
second. b_m - a_m is the remainder, in microseconds, and we
can simply add this to the rounded timedelta.
Parameters
----------
a : cftime.datetime
Input datetime
b : cftime.datetime
Input datetime
Returns
-------
datetime.timedelta
"""
seconds = b.replace(microsecond=0) - a.replace(microsecond=0)
seconds = int(round(seconds.total_seconds()))
microseconds = b.microsecond - a.microsecond
return datetime.timedelta(seconds=seconds, microseconds=microseconds)
|
apache-2.0
|
Hezi-Resheff/location-based-behav
|
loc-vs-acc/location/tables.py
|
1
|
4881
|
import pandas as pd
import numpy as np
import os
from location import trajectory_processor
from settings import DATA_ROOT
def compare_behav_types(data_file, min_sampels=2000, r=1, hard_max=3):
path = os.path.join(DATA_ROOT, data_file)
animal_data = pd.DataFrame.from_csv(path, parse_dates=["stamp"])
animal_data.behav = animal_data.behav.replace("\\N", -1).apply(int) # Clean animal behav and add the unknown==-1 style
animals = animal_data["bird_id"].unique()
out = {}
outn = {} # normalized
for animal in animals:
data = animal_data.loc[animal_data.bird_id == animal].copy()
print(animal)
if len(data) < min_sampels:
continue
data = trajectory_processor(data, stamp=False).compute_first_passage(r, hard_max=hard_max).clean_day_end().cluster("FPT_{}".format(r), k=3)
pivot = pd.pivot_table(data, values=["bird_id"], index=["behav"], columns=["cluster"], aggfunc=pd.DataFrame.count)
pivotn = pivot.apply(lambda col: col/col.sum()*100, axis=0) # normalized per column (cluster)
out[animal] = pivot
outn[animal] = pivotn
print(pivot, pivotn)
panel = pd.Panel.from_dict(out)
paneln = pd.Panel.from_dict(outn)
return panel, paneln
def marginals_etc(data_file, min_sampels=2000, r=1, hard_max=3):
path = os.path.join(DATA_ROOT, data_file)
animal_data = pd.DataFrame.from_csv(path, parse_dates=["stamp"])
animal_data.behav = animal_data.behav.replace("\\N", -1).apply(int) # Clean animal behav and add the unknown==-1 style
animal_data.ODBA = animal_data.ODBA.replace("\\N", np.NaN).apply(float)
animals = animal_data["bird_id"].unique()
time = {}
distance_cluster = {}
distance_behav = {}
odba_cluster = {}
odba_behav = {}
for animal in animals:
data = animal_data.loc[animal_data.bird_id == animal].copy()
print(animal)
if len(data) < min_sampels:
continue
data = trajectory_processor(data, stamp=False).compute_steps().compute_first_passage(r, hard_max=hard_max).clean_day_end().cluster("FPT_{}".format(r), k=3)
time[animal] = data["time"].groupby(data["cluster"]).sum()
distance_cluster[animal] = data["dist"].groupby(data["cluster"]).mean()
distance_behav[animal] = data["dist"].groupby(data["behav"]).mean()
odba_cluster[animal] = data["ODBA"].groupby(data["cluster"]).mean()
odba_behav[animal] = data["ODBA"].groupby(data["behav"]).mean()
print([d[animal] for d in [time, distance_cluster, distance_behav, odba_cluster, odba_behav]])
return time, distance_cluster, distance_behav, odba_cluster, odba_behav
def data_with_fpt_mode(data_file, min_sampels=2000, r=1, hard_max=3):
""" Add the FPT behavioral mode to the entire data """
path = os.path.join(DATA_ROOT, data_file)
animal_data = pd.DataFrame.from_csv(path, parse_dates=["stamp"])
animal_data.behav = animal_data.behav.replace("\\N", -1).apply(int) # Clean animal behav and add the unknown==-1 style
animal_data.ODBA = animal_data.ODBA.replace("\\N", np.NaN).apply(float)
animals = animal_data["bird_id"].unique()
def animaliter():
for animal in animals:
data = animal_data.loc[animal_data.bird_id == animal].copy()
print(animal)
if len(data) < min_sampels:
continue
yield data
frames = [trajectory_processor(data, stamp=False).compute_steps().compute_first_passage(r, hard_max=hard_max).clean_day_end().cluster("FPT_{}".format(r), k=3)
for data in animaliter()]
return pd.concat(frames).reset_index(drop=True)
if __name__ == "__main__":
data_file = "Storks_Africa__10_to_12_2012__with_behav__ALL.csv"
opt = "add-fpt-modes"
if opt == "compare-behav":
# Compare behav types
p, pn = compare_behav_types(data_file)
p.to_pickle(os.path.join(DATA_ROOT, "out", "compare_behav_types__panel(r=1-max=3h).pkl"))
pn.to_pickle(os.path.join(DATA_ROOT, "out", "compare_behav_types__panel__normalized(r=1-max=3h).pkl"))
elif opt == "marginals":
# Marginals
time, distance_cluster, distance_behav, odba_cluster, odba_behav = marginals_etc(data_file)
# save
for p_list in ('time', 'distance_cluster', 'distance_behav', 'odba_cluster', 'odba_behav'):
pd.DataFrame(eval(p_list)).to_csv(os.path.join(DATA_ROOT, "out", "marginals", "{}.csv".format(p_list)))
elif opt == "add-fpt-modes":
data_with_fpt_mode(data_file).to_csv(os.path.join(DATA_ROOT, "Storks_Africa__10_to_12_2012__with_behav__ALL__FPT.csv"))
else:
print("Nothing to do. Good night :)")
|
mit
|
Maplenormandy/list-62x
|
python/linearBrightnessTest.py
|
1
|
4712
|
import cv2
import math
import pandas as pd
import numpy as np
import time, sys, os, shutil
import yaml
from multiprocessing import Process, Queue
from Queue import Empty
import imageFeatures as imf
"""
# This script collects data
"""
def currentTimestamp():
return pd.Timestamp(time.time()*1000000000)
def imageSaver(foldername, q):
while True:
toSave = None
try:
toSave = q.get(True, 1)
except Empty:
pass
if toSave != None:
if toSave == False:
print "Done"
break
name, frame = toSave
cv2.imwrite(foldername + '/' + name, frame, [cv2.IMWRITE_PNG_COMPRESSION, 9])
print "Wrote", foldername + '/' + name
frames = pd.Series([], dtype=int, name='Frame')
data = pd.DataFrame(index=frames)
params = {}
def setParam(name, x):
params[name] = x
print 'Run name:',
shortname = raw_input()
cv2.namedWindow('frame')
# Change 0 to the index that works
cap0 = cv2.VideoCapture(0)
cap1 = cv2.VideoCapture(1)
# Create the output directory and copy over stuff
for i in range(100):
foldername = 'data/' + shortname + '_' + str(i)
if not os.path.exists(foldername):
os.makedirs(foldername)
break
def setCap0Exposure(x):
cap0.set(15,x)
def setCap1Exposure(x):
cap1.set(15,x)
def setCap0Gain(x):
cap0.set(14,x)
def setCap1Gain(x):
cap1.set(14,x)
def setCap0Auto(x):
cap0.set(21,x)
def setCap1Auto(x):
cap1.set(21,x)
# Helper variables
t = 0
i = 0
# This is for waiting for the autoexposure to settle
autoSettle = 0
if cap0.isOpened() and cap1.isOpened():
q = Queue()
p = Process(target=imageSaver, args=(foldername, q,))
p.start()
# Turn off white balance
cap0.set(17, -4)
cap0.set(26, -4)
cap1.set(17, -4)
cap1.set(26, -4)
while True:
if t < len(data) and i == 0:
cap0.set(15, data.loc[t, 'Shutter 0'])
cap0.set(14, data.loc[t, 'Gain 0'])
cap1.set(15, data.loc[t, 'Shutter 1'])
cap1.set(14, data.loc[t, 'Gain 1'])
if abs(data.loc[t, 'Shutter 0']+2.0) < 0.1:
autoSettle = 24
else:
autoSettle = 0
i += 1
if t >= len(data) or i-autoSettle > 6:
ret0, frame0 = cap0.read()
ret1, frame1 = cap1.read()
if ret0 and ret1:
frame0 = cv2.cvtColor(frame0, cv2.COLOR_BAYER_BG2BGR)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BAYER_BG2BGR)
disp = np.concatenate((frame0, frame1), axis=1)
if t < len(data):
data.loc[t, 'Timestamp'] = currentTimestamp()
data.loc[t, 'Shutter 0'] = cap0.get(15)
data.loc[t, 'Gain 0'] = cap0.get(14)
imgname0 = shortname + '_0_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 0'] = imgname0
gray0 = cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY)
data.loc[t, 'Mean Lum 0'] = imf.meanLuminance(gray0)
data.loc[t, 'Contrast 0'] = imf.contrast(gray0)
q.put((imgname0, frame0))
data.loc[t, 'Shutter 1'] = cap1.get(15)
data.loc[t, 'Gain 1'] = cap1.get(14)
imgname1 = shortname + '_1_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 1'] = imgname1
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
data.loc[t, 'Mean Lum 1'] = imf.meanLuminance(gray1)
data.loc[t, 'Contrast 1'] = imf.contrast(gray1)
q.put((imgname1, frame1))
t += 1
cv2.imshow('frame', disp)
i = 0
else:
cap0.grab()
cap1.grab()
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
elif key == ord('w') and t >= len(data):
t = len(data)
shutterRange = np.linspace(1.0, 531.0, 24)
gainRange = np.linspace(16.0, 64.0, 8)
cap0.set(15, 1.0)
cap0.set(14, 16.0)
cap1.set(15, 1.0)
cap1.set(14, 16.0)
k = 0
for s in shutterRange:
for g in gainRange:
data.loc[t+k, 'Shutter 0'] = s
data.loc[t+k, 'Shutter 1'] = s
data.loc[t+k, 'Gain 0'] = g
data.loc[t+k, 'Gain 1'] = g
k += 1
i = 0
q.put(False)
q.close()
p.join()
if len(data) > 0:
data.to_csv(foldername + '/' + shortname + '_rawdata.csv')
|
mit
|
yask123/scikit-learn
|
examples/decomposition/plot_pca_vs_lda.py
|
68
|
1807
|
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
|
bsd-3-clause
|
Akshay0724/scikit-learn
|
examples/text/document_classification_20newsgroups.py
|
29
|
10822
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
# order of labels in `target_names` can be different from `categories`
target_names = data_train.target_names
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s" % (label, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
bsd-3-clause
|
matpalm/malmomo
|
event_log.py
|
1
|
3193
|
#!/usr/bin/env python
import gzip
import matplotlib.pyplot as plt
import model_pb2
import numpy as np
import StringIO
import struct
import util
class EventLog(object):
def __init__(self, path):
self.log_file = open(path, "ab")
self.episode_entry = None
def add_episode(self, episode):
for event in episode.event:
util.ensure_render_is_png_encoded(event.render)
buff = episode.SerializeToString()
buff_len = struct.pack('=l', len(buff))
self.log_file.write(buff_len)
self.log_file.write(buff)
self.log_file.flush()
class EventLogReader(object):
def __init__(self, path):
if path.endswith(".gz"):
self.log_file = gzip.open(path, "rb")
else:
self.log_file = open(path, "rb")
def entries(self):
episode = model_pb2.Episode()
while True:
buff_len_bytes = self.log_file.read(4)
if len(buff_len_bytes) == 0: return
buff_len = struct.unpack('=l', buff_len_bytes)[0]
buff = self.log_file.read(buff_len)
episode.ParseFromString(buff)
yield episode
if __name__ == "__main__":
import argparse, os, sys, Image, ImageDraw
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--file', type=str, default=None)
parser.add_argument('--echo', action='store_true', help="write event to stdout")
parser.add_argument('--episodes', type=str, default=None,
help="if set only process these specific episodes (comma separated list)")
parser.add_argument('--nth', type=int, default=None,
help="if set emit every nth episode")
parser.add_argument('--img-output-dir', type=str, default=None,
help="if set output all renders to this DIR/e_NUM/s_NUM.png")
opts = parser.parse_args()
episode_whitelist = None
if opts.episodes is not None:
episode_whitelist = set(map(int, opts.episodes.split(",")))
if opts.img_output_dir is not None:
util.make_dir(opts.img_output_dir)
total_num_read_episodes = 0
total_num_read_events = 0
elr = EventLogReader(opts.file)
for episode_id, episode in enumerate(elr.entries()):
if opts.nth is not None and episode_id % opts.nth != 0:
continue
if episode_whitelist is not None:
if episode_id not in episode_whitelist:
continue
episode_whitelist.discard(episode_id)
if opts.echo:
print "-----", episode_id
print episode
total_num_read_episodes += 1
total_num_read_events += len(episode.event)
if opts.img_output_dir is not None:
dir = "%s/ep_%05d" % (opts.img_output_dir, episode_id)
util.make_dir(dir)
print "writing to", dir
for event_id, event in enumerate(episode.event):
assert event.render.is_png_encoded, "only expect serialised pngs"
img = Image.open(StringIO.StringIO(event.render.bytes))
# img = img.resize((200, 200))
filename = "%s/e%04d.png" % (dir, event_id)
img.save(filename)
if episode_whitelist is not None and len(episode_whitelist) == 0:
break
print >>sys.stderr, "read", total_num_read_episodes, "episodes for a total of", total_num_read_events, "events"
|
mit
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/_cm.py
|
2
|
90051
|
"""
Nothing here but dictionaries for generating LinearSegmentedColormaps,
and a dictionary of these dictionaries.
"""
from __future__ import print_function, division
import numpy as np
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {
'red': lambda x: 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5,
'green': lambda x: np.sin(x * 31.5 * np.pi),
'blue': lambda x: 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5,
}
_prism_data = {
'red': lambda x: 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67,
'green': lambda x: 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33,
'blue': lambda x: -1.1 * np.sin((x * 20.9) * np.pi),
}
def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0):
"""Return custom data dictionary of (r,g,b) conversion functions, which
can be used with :func:`register_cmap`, for the cubehelix color scheme.
Unlike most other color schemes cubehelix was designed by D.A. Green to
be monotonically increasing in terms of perceived brightness.
Also, when printed on a black and white postscript printer, the scheme
results in a greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b values produced
can be visualised as a squashed helix around the diagonal in the
r,g,b color cube.
For a unit color cube (i.e. 3-D coordinates for r,g,b each in the
range 0 to 1) the color scheme starts at (r,g,b) = (0,0,0), i.e. black,
and finishes at (r,g,b) = (1,1,1), i.e. white. For some fraction *x*,
between 0 and 1, the color is the corresponding grey value at that
fraction along the black to white diagonal (x,x,x) plus a color
element. This color element is calculated in a plane of constant
perceived intensity and controlled by the following parameters.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
gamma gamma factor to emphasise either low intensity values
(gamma < 1), or high intensity values (gamma > 1);
defaults to 1.0.
s the start color; defaults to 0.5 (i.e. purple).
r the number of r,g,b rotations in color that are made
from the start to the end of the color scheme; defaults
to -1.5 (i.e. -> B -> G -> R -> B).
h the hue parameter which controls how saturated the
colors are. If this parameter is zero then the color
scheme is purely a greyscale; defaults to 1.0.
========= =======================================================
"""
def get_color_function(p0, p1):
def color(x):
# Apply gamma factor to emphasise low or high intensity values
xg = x**gamma
# Calculate amplitude and angle of deviation from the black
# to white diagonal in the plane of constant
# perceived intensity.
a = h * xg * (1 - xg) / 2
phi = 2 * np.pi * (s / 3 + r * x)
return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return color
return {
'red': get_color_function(-0.14861, 1.78277),
'green': get_color_function(-0.29227, -0.90649),
'blue': get_color_function(1.97294, 0.0),
}
_cubehelix_data = cubehelix()
_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))
_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0))
# Gnuplot palette functions
gfunc = {
0: lambda x: 0,
1: lambda x: 0.5,
2: lambda x: 1,
3: lambda x: x,
4: lambda x: x**2,
5: lambda x: x**3,
6: lambda x: x**4,
7: lambda x: np.sqrt(x),
8: lambda x: np.sqrt(np.sqrt(x)),
9: lambda x: np.sin(x * np.pi / 2),
10: lambda x: np.cos(x * np.pi / 2),
11: lambda x: np.abs(x - 0.5),
12: lambda x: (2 * x - 1)**2,
13: lambda x: np.sin(x * np.pi),
14: lambda x: np.abs(np.cos(x * np.pi)),
15: lambda x: np.sin(x * 2 * np.pi),
16: lambda x: np.cos(x * 2 * np.pi),
17: lambda x: np.abs(np.sin(x * 2 * np.pi)),
18: lambda x: np.abs(np.cos(x * 2 * np.pi)),
19: lambda x: np.abs(np.sin(x * 4 * np.pi)),
20: lambda x: np.abs(np.cos(x * 4 * np.pi)),
21: lambda x: 3 * x,
22: lambda x: 3 * x - 1,
23: lambda x: 3 * x - 2,
24: lambda x: np.abs(3 * x - 1),
25: lambda x: np.abs(3 * x - 2),
26: lambda x: (3 * x - 1) / 2,
27: lambda x: (3 * x - 2) / 2,
28: lambda x: np.abs((3 * x - 1) / 2),
29: lambda x: np.abs((3 * x - 2) / 2),
30: lambda x: x / 0.32 - 0.78125,
31: lambda x: 2 * x - 0.84,
32: lambda x: gfunc32(x),
33: lambda x: np.abs(2 * x - 0.5),
34: lambda x: 2 * x,
35: lambda x: 2 * x - 0.5,
36: lambda x: 2 * x - 1.
}
def gfunc32(x):
ret = np.zeros(len(x))
m = (x < 0.25)
ret[m] = 4 * x[m]
m = (x >= 0.25) & (x < 0.92)
ret[m] = -2 * x[m] + 1.84
m = (x >= 0.92)
ret[m] = x[m] / 0.08 - 11.5
return ret
_gnuplot_data = {
'red': gfunc[7],
'green': gfunc[5],
'blue': gfunc[15],
}
_gnuplot2_data = {
'red': gfunc[30],
'green': gfunc[31],
'blue': gfunc[32],
}
_ocean_data = {
'red': gfunc[23],
'green': gfunc[28],
'blue': gfunc[3],
}
_afmhot_data = {
'red': gfunc[34],
'green': gfunc[35],
'blue': gfunc[36],
}
_rainbow_data = {
'red': gfunc[33],
'green': gfunc[13],
'blue': gfunc[10],
}
_seismic_data = (
(0.0, 0.0, 0.3), (0.0, 0.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 0.0, 0.0),
(0.5, 0.0, 0.0))
_terrain_data = (
(0.00, (0.2, 0.2, 0.6)),
(0.15, (0.0, 0.6, 1.0)),
(0.25, (0.0, 0.8, 0.4)),
(0.50, (1.0, 1.0, 0.6)),
(0.75, (0.5, 0.36, 0.33)),
(1.00, (1.0, 1.0, 1.0)))
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
#
# Most palette functions have been reduced to simple function descriptions
# by Reinier Heeres, since the rgb components were mostly straight lines.
# gist_earth_data and gist_ncar_data were simplified by a script and some
# manual effort.
_gist_earth_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.2824, 0.1882, 0.1882),
(0.4588, 0.2714, 0.2714),
(0.5490, 0.4719, 0.4719),
(0.6980, 0.7176, 0.7176),
(0.7882, 0.7553, 0.7553),
(1.0000, 0.9922, 0.9922),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0275, 0.0000, 0.0000),
(0.1098, 0.1893, 0.1893),
(0.1647, 0.3035, 0.3035),
(0.2078, 0.3841, 0.3841),
(0.2824, 0.5020, 0.5020),
(0.5216, 0.6397, 0.6397),
(0.6980, 0.7171, 0.7171),
(0.7882, 0.6392, 0.6392),
(0.7922, 0.6413, 0.6413),
(0.8000, 0.6447, 0.6447),
(0.8078, 0.6481, 0.6481),
(0.8157, 0.6549, 0.6549),
(0.8667, 0.6991, 0.6991),
(0.8745, 0.7103, 0.7103),
(0.8824, 0.7216, 0.7216),
(0.8902, 0.7323, 0.7323),
(0.8980, 0.7430, 0.7430),
(0.9412, 0.8275, 0.8275),
(0.9569, 0.8635, 0.8635),
(0.9647, 0.8816, 0.8816),
(0.9961, 0.9733, 0.9733),
(1.0000, 0.9843, 0.9843),
), 'blue': (
(0.0, 0.0, 0.0000),
(0.0039, 0.1684, 0.1684),
(0.0078, 0.2212, 0.2212),
(0.0275, 0.4329, 0.4329),
(0.0314, 0.4549, 0.4549),
(0.2824, 0.5004, 0.5004),
(0.4667, 0.2748, 0.2748),
(0.5451, 0.3205, 0.3205),
(0.7843, 0.3961, 0.3961),
(0.8941, 0.6651, 0.6651),
(1.0000, 0.9843, 0.9843),
)}
_gist_gray_data = {
'red': gfunc[3],
'green': gfunc[3],
'blue': gfunc[3],
}
_gist_heat_data = {
'red': lambda x: 1.5 * x,
'green': lambda x: 2 * x - 1,
'blue': lambda x: 4 * x - 3,
}
_gist_ncar_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.3098, 0.0000, 0.0000),
(0.3725, 0.3993, 0.3993),
(0.4235, 0.5003, 0.5003),
(0.5333, 1.0000, 1.0000),
(0.7922, 1.0000, 1.0000),
(0.8471, 0.6218, 0.6218),
(0.8980, 0.9235, 0.9235),
(1.0000, 0.9961, 0.9961),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0510, 0.3722, 0.3722),
(0.1059, 0.0000, 0.0000),
(0.1569, 0.7202, 0.7202),
(0.1608, 0.7537, 0.7537),
(0.1647, 0.7752, 0.7752),
(0.2157, 1.0000, 1.0000),
(0.2588, 0.9804, 0.9804),
(0.2706, 0.9804, 0.9804),
(0.3176, 1.0000, 1.0000),
(0.3686, 0.8081, 0.8081),
(0.4275, 1.0000, 1.0000),
(0.5216, 1.0000, 1.0000),
(0.6314, 0.7292, 0.7292),
(0.6863, 0.2796, 0.2796),
(0.7451, 0.0000, 0.0000),
(0.7922, 0.0000, 0.0000),
(0.8431, 0.1753, 0.1753),
(0.8980, 0.5000, 0.5000),
(1.0000, 0.9725, 0.9725),
), 'blue': (
(0.0, 0.5020, 0.5020),
(0.0510, 0.0222, 0.0222),
(0.1098, 1.0000, 1.0000),
(0.2039, 1.0000, 1.0000),
(0.2627, 0.6145, 0.6145),
(0.3216, 0.0000, 0.0000),
(0.4157, 0.0000, 0.0000),
(0.4745, 0.2342, 0.2342),
(0.5333, 0.0000, 0.0000),
(0.5804, 0.0000, 0.0000),
(0.6314, 0.0549, 0.0549),
(0.6902, 0.0000, 0.0000),
(0.7373, 0.0000, 0.0000),
(0.7922, 0.9738, 0.9738),
(0.8000, 1.0000, 1.0000),
(0.8431, 1.0000, 1.0000),
(0.8980, 0.9341, 0.9341),
(1.0000, 0.9961, 0.9961),
)}
_gist_rainbow_data = (
(0.000, (1.00, 0.00, 0.16)),
(0.030, (1.00, 0.00, 0.00)),
(0.215, (1.00, 1.00, 0.00)),
(0.400, (0.00, 1.00, 0.00)),
(0.586, (0.00, 1.00, 1.00)),
(0.770, (0.00, 0.00, 1.00)),
(0.954, (1.00, 0.00, 1.00)),
(1.000, (1.00, 0.00, 0.75))
)
_gist_stern_data = {
'red': (
(0.000, 0.000, 0.000), (0.0547, 1.000, 1.000),
(0.250, 0.027, 0.250), #(0.2500, 0.250, 0.250),
(1.000, 1.000, 1.000)),
'green': ((0, 0, 0), (1, 1, 1)),
'blue': (
(0.000, 0.000, 0.000), (0.500, 1.000, 1.000),
(0.735, 0.000, 0.000), (1.000, 1.000, 1.000))
}
_gist_yarg_data = {
'red': lambda x: 1 - x,
'green': lambda x: 1 - x,
'blue': lambda x: 1 - x,
}
# This bipolar color map was generated from CoolWarmFloat33.csv of
# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland.
# <http://www.cs.unm.edu/~kmorel/documents/ColorMaps/>
_coolwarm_data = {
'red': [
(0.0, 0.2298057, 0.2298057),
(0.03125, 0.26623388, 0.26623388),
(0.0625, 0.30386891, 0.30386891),
(0.09375, 0.342804478, 0.342804478),
(0.125, 0.38301334, 0.38301334),
(0.15625, 0.424369608, 0.424369608),
(0.1875, 0.46666708, 0.46666708),
(0.21875, 0.509635204, 0.509635204),
(0.25, 0.552953156, 0.552953156),
(0.28125, 0.596262162, 0.596262162),
(0.3125, 0.639176211, 0.639176211),
(0.34375, 0.681291281, 0.681291281),
(0.375, 0.722193294, 0.722193294),
(0.40625, 0.761464949, 0.761464949),
(0.4375, 0.798691636, 0.798691636),
(0.46875, 0.833466556, 0.833466556),
(0.5, 0.865395197, 0.865395197),
(0.53125, 0.897787179, 0.897787179),
(0.5625, 0.924127593, 0.924127593),
(0.59375, 0.944468518, 0.944468518),
(0.625, 0.958852946, 0.958852946),
(0.65625, 0.96732803, 0.96732803),
(0.6875, 0.969954137, 0.969954137),
(0.71875, 0.966811177, 0.966811177),
(0.75, 0.958003065, 0.958003065),
(0.78125, 0.943660866, 0.943660866),
(0.8125, 0.923944917, 0.923944917),
(0.84375, 0.89904617, 0.89904617),
(0.875, 0.869186849, 0.869186849),
(0.90625, 0.834620542, 0.834620542),
(0.9375, 0.795631745, 0.795631745),
(0.96875, 0.752534934, 0.752534934),
(1.0, 0.705673158, 0.705673158)],
'green': [
(0.0, 0.298717966, 0.298717966),
(0.03125, 0.353094838, 0.353094838),
(0.0625, 0.406535296, 0.406535296),
(0.09375, 0.458757618, 0.458757618),
(0.125, 0.50941904, 0.50941904),
(0.15625, 0.558148092, 0.558148092),
(0.1875, 0.604562568, 0.604562568),
(0.21875, 0.648280772, 0.648280772),
(0.25, 0.688929332, 0.688929332),
(0.28125, 0.726149107, 0.726149107),
(0.3125, 0.759599947, 0.759599947),
(0.34375, 0.788964712, 0.788964712),
(0.375, 0.813952739, 0.813952739),
(0.40625, 0.834302879, 0.834302879),
(0.4375, 0.849786142, 0.849786142),
(0.46875, 0.860207984, 0.860207984),
(0.5, 0.86541021, 0.86541021),
(0.53125, 0.848937047, 0.848937047),
(0.5625, 0.827384882, 0.827384882),
(0.59375, 0.800927443, 0.800927443),
(0.625, 0.769767752, 0.769767752),
(0.65625, 0.734132809, 0.734132809),
(0.6875, 0.694266682, 0.694266682),
(0.71875, 0.650421156, 0.650421156),
(0.75, 0.602842431, 0.602842431),
(0.78125, 0.551750968, 0.551750968),
(0.8125, 0.49730856, 0.49730856),
(0.84375, 0.439559467, 0.439559467),
(0.875, 0.378313092, 0.378313092),
(0.90625, 0.312874446, 0.312874446),
(0.9375, 0.24128379, 0.24128379),
(0.96875, 0.157246067, 0.157246067),
(1.0, 0.01555616, 0.01555616)],
'blue': [
(0.0, 0.753683153, 0.753683153),
(0.03125, 0.801466763, 0.801466763),
(0.0625, 0.84495867, 0.84495867),
(0.09375, 0.883725899, 0.883725899),
(0.125, 0.917387822, 0.917387822),
(0.15625, 0.945619588, 0.945619588),
(0.1875, 0.968154911, 0.968154911),
(0.21875, 0.98478814, 0.98478814),
(0.25, 0.995375608, 0.995375608),
(0.28125, 0.999836203, 0.999836203),
(0.3125, 0.998151185, 0.998151185),
(0.34375, 0.990363227, 0.990363227),
(0.375, 0.976574709, 0.976574709),
(0.40625, 0.956945269, 0.956945269),
(0.4375, 0.931688648, 0.931688648),
(0.46875, 0.901068838, 0.901068838),
(0.5, 0.865395561, 0.865395561),
(0.53125, 0.820880546, 0.820880546),
(0.5625, 0.774508472, 0.774508472),
(0.59375, 0.726736146, 0.726736146),
(0.625, 0.678007945, 0.678007945),
(0.65625, 0.628751763, 0.628751763),
(0.6875, 0.579375448, 0.579375448),
(0.71875, 0.530263762, 0.530263762),
(0.75, 0.481775914, 0.481775914),
(0.78125, 0.434243684, 0.434243684),
(0.8125, 0.387970225, 0.387970225),
(0.84375, 0.343229596, 0.343229596),
(0.875, 0.300267182, 0.300267182),
(0.90625, 0.259301199, 0.259301199),
(0.9375, 0.220525627, 0.220525627),
(0.96875, 0.184115123, 0.184115123),
(1.0, 0.150232812, 0.150232812)]
}
# Implementation of Carey Rappaport's CMRmap.
# See `A Color Map for Effective Black-and-White Rendering of Color-Scale Images' by Carey Rappaport
# http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m
_CMRmap_data = {'red' : ( (0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.30, 0.30),
(0.375, 0.60, 0.60),
(0.500, 1.00, 1.00),
(0.625, 0.90, 0.90),
(0.750, 0.90, 0.90),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00) ),
'green' : ( (0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.15, 0.15),
(0.375, 0.20, 0.20),
(0.500, 0.25, 0.25),
(0.625, 0.50, 0.50),
(0.750, 0.75, 0.75),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00) ),
'blue': ( (0.000, 0.00, 0.00),
(0.125, 0.50, 0.50),
(0.250, 0.75, 0.75),
(0.375, 0.50, 0.50),
(0.500, 0.15, 0.15),
(0.625, 0.00, 0.00),
(0.750, 0.10, 0.10),
(0.875, 0.50, 0.50),
(1.000, 1.00, 1.00) )}
datad = {
'afmhot': _afmhot_data,
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'bwr': _bwr_data,
'brg': _brg_data,
'CMRmap': _CMRmap_data,
'cool': _cool_data,
'copper': _copper_data,
'cubehelix': _cubehelix_data,
'flag': _flag_data,
'gnuplot': _gnuplot_data,
'gnuplot2': _gnuplot2_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'ocean': _ocean_data,
'pink': _pink_data,
'prism': _prism_data,
'rainbow': _rainbow_data,
'seismic': _seismic_data,
'spring': _spring_data,
'summer': _summer_data,
'terrain': _terrain_data,
'winter': _winter_data,
'spectral': _spectral_data
}
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
datad['coolwarm']=_coolwarm_data
|
mit
|
stefangri/s_s_productions
|
PHY341/V201_Dulong_Petit/Messdaten/auswertung.py
|
1
|
6281
|
import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
from pint import UnitRegistry
import scipy.constants as const
import matplotlib.pyplot as plt
u = UnitRegistry()
Q_ = u.Quantity
R = Q_(const.gas_constant, 'joule / (mole * kelvin)')
c_wasser = Q_(4.18, 'joule/(gram kelvin)')
rho_wasser = Q_(992.2, 'kg / (m^3)')
m_wasser = Q_(0.6, 'liter').to('m^3') * rho_wasser
#werte für die Berechnung von cgmg
m_wasser_x = Q_(0.3, 'liter').to('m^3') * rho_wasser
m_wasser_y = m_wasser_x
print('Masse_x: ', m_wasser_x)
U_y = Q_(2.760, 'dimensionless')
U_x = Q_(0.749, 'dimensionless')
U_misch_for_cgmg = Q_(1.594, 'dimensionless')
#Ergebnisse aus Eichung der Thermoelemente, bestimmung der Geradensteigung
U_0 = 0
U_100 = 3.98
x = np.linspace(0, 100)
m = 100 / (U_100 - U_0)
print ('Steigung: ', m)
def UtoT(u):
return Q_(m * u + 273.15, 'kelvin')
#berechnung cm_mg
T_y = UtoT(U_y)
T_x = UtoT(U_x)
print('Temperaturen x, y: ', T_x, T_y)
T_misch_for_cgmg = UtoT(U_misch_for_cgmg)
c_g_m_g = ((c_wasser * m_wasser_y * (T_y - T_misch_for_cgmg) - c_wasser * m_wasser_x*(T_misch_for_cgmg - T_x))/(T_misch_for_cgmg - T_x) ).to('joule/kelvin')
print('c_g_m_g: ', c_g_m_g)
#einlesen der konstanten für graphit
rho_graphit_raw, M_graphit_raw, alpha_graphit_raw, kappa_graphit_raw = np.genfromtxt('materialkonstanten_graphit.txt', unpack=True)
rho_graphit = Q_(rho_graphit_raw, 'gram/centimeter^3').to('gram/(m^3)')
M_graphit = Q_(M_graphit_raw, 'gram/mol')
alpha_graphit = Q_(alpha_graphit_raw * 1e-06, '1/kelvin')
kappa_graphit = Q_(kappa_graphit_raw * 1e09, 'kilogram/(meter*second**2)')
mol_vol_graphit = M_graphit / rho_graphit
#print(mol_vol_graphit)
#Einlesen der Werte für Graphit
m_k_graphit = Q_(247.79 - 139.77, 'gram').to('kilogram')
U_k_graphit, U_w_graphit, U_m_graphit = np.genfromtxt('graphit.txt', unpack=True)
T_k_graphit = UtoT(U_k_graphit)
T_w_graphit = UtoT(U_w_graphit)
T_m_graphit = UtoT(U_m_graphit)
print('TK, TW, TM Graphit: ', T_k_graphit, T_w_graphit, T_m_graphit)
#berechnung von c_k
c_k_graphit = ((c_wasser * m_wasser + c_g_m_g)*(T_m_graphit - T_w_graphit))/(m_k_graphit*(T_k_graphit - T_m_graphit)) * M_graphit
print('ck Graphit: ',c_k_graphit)
print(R)
#print('a', (c_k_graphit - 9 * alpha_graphit**2 * kappa_graphit * mol_vol_graphit * T_m_graphit)/R)
const_graphit = (alpha_graphit**2 * kappa_graphit * mol_vol_graphit * T_m_graphit).to('joule/(mole*kelvin)')
c_v_graphit = c_k_graphit - const_graphit
c_graphit_lit = Q_(0.751, 'joule /(gram* kelvin)' ) * M_graphit
print('Literaturwert der spezifischen Wärmekapazität Graphit: ', c_graphit_lit)
print(c_v_graphit)
print(c_v_graphit / (3 * R) -1 )
print('Mittelwert Graphit: ', np.mean(c_v_graphit), 'pm', 1/np.sqrt(3) * np.std(c_v_graphit))
#einlesen der konstanten für blei
rho_blei_raw, M_blei_raw, alpha_blei_raw, kappa_blei_raw = np.genfromtxt('materialkonstanten_blei.txt', unpack=True)
rho_blei = Q_(rho_blei_raw, 'gram/centimeter^3').to('gram/(m^3)')
M_blei = Q_(M_blei_raw, 'gram/mol')
alpha_blei = Q_(alpha_blei_raw * 1e-06, '1/kelvin')
kappa_blei = Q_(kappa_blei_raw * 1e09, 'kilogram/(meter*second**2)')
mol_vol_blei = M_blei / rho_blei
#Einlesen der Werte für blei
m_k_blei = Q_(370.53 - 138.5, 'gram').to('kilogram')
U_k_blei, U_w_blei, U_m_blei = np.genfromtxt('blei.txt', unpack=True)
T_k_blei = UtoT(U_k_blei)
T_w_blei = UtoT(U_w_blei)
T_m_blei = UtoT(U_m_blei)
print('TK, TW, TM Zinn: ', T_k_blei, T_w_blei, T_m_blei)
#berechnung von c_k
c_k_blei = ((c_wasser * m_wasser + c_g_m_g)*(T_m_blei - T_w_blei))/(m_k_blei * (T_k_blei - T_m_blei)) * M_blei
print('spezifische Wärmekapazität Blei: ', c_k_blei)
const_blei = (alpha_blei**2 * kappa_blei * mol_vol_blei * T_m_blei).to('joule/(mole*kelvin)')
c_blei_lit = Q_(0.230, 'joule /(gram* kelvin)' ) * M_blei
print('Literaturwert der spezifischen Wärmekapazität Zinn: ', c_blei_lit)
c_v_blei = c_k_blei - const_blei
print('Prozentuale Abweichung vom lit Wert', c_v_blei / c_blei_lit - 1)
print(c_v_blei)
print('Prozentuale Abweichung von 3R: ', c_v_blei / (3 * R) -1 )
print('Mittelwert zinn: ', np.mean(c_v_blei), 'pm', 1/np.sqrt(3) * np.std(c_v_blei))
#einlesen der konstanten für aluminium
rho_alu_raw, M_alu_raw, alpha_alu_raw, kappa_alu_raw = np.genfromtxt('materialkonstanten_aluminium.txt', unpack=True)
rho_alu = Q_(rho_alu_raw, 'gram/centimeter^3').to('gram/(m^3)')
M_alu = Q_(M_alu_raw, 'gram/mol')
alpha_alu = Q_(alpha_alu_raw * 1e-06, '1/kelvin')
kappa_alu = Q_(kappa_alu_raw * 1e09, 'kilogram/(meter*second**2)')
mol_vol_alu = M_alu / rho_alu
#Einlesen der Werte für alu
m_k_alu = Q_(255.07 - 140.50, 'gram').to('kilogram')
U_k_alu, U_w_alu, U_m_alu = np.genfromtxt('aluminium.txt', unpack=True)
T_k_alu = UtoT(U_k_alu)
T_w_alu = UtoT(U_w_alu)
T_m_alu = UtoT(U_m_alu)
#print('TK, TW, TM Aluminium: ', T_k_alu, T_w_alu, T_m_alu)
#berechnung von c_k
c_k_alu = ((c_wasser * m_wasser + c_g_m_g)*(T_m_alu - T_w_alu))/(m_k_alu * (T_k_alu - T_m_alu)) * M_alu
print('spezifische Wärmekapazität alu: ', c_k_alu)
const_alu = (alpha_alu**2 * kappa_alu * mol_vol_alu * T_m_alu).to('joule/(mole*kelvin)')
c_alu_lit = Q_(0.896, 'joule /(gram* kelvin)' ) * M_alu
print('Literaturwert der spezifischen Wärmekapazität Alu: ', c_alu_lit)
c_v_alu = c_k_alu - const_alu
print('Prozentuale Abweichung vom lit Wert', c_v_alu / c_alu_lit - 1)
print(c_v_alu)
with open('tab.tex', 'w') as f:
f.write('\\begin{table} \n \\centering \n \\begin{tabular}{')
f.write('l' + 3 *'S ')
f.write('} \n \\toprule \n')
f.write(' {Stoff} & {{$c_k$ in $\si{\joule \per {\kelvin \mol}}$}} & {{$C_V$ in $\si{\joule \per {\kelvin \mol}}$}} & {$\\frac{C_V}{R}$} \\\ \n')
f.write('\\midrule \n ')
for i in range (0,3):
f.write('{{Graphit}} & {:.2f} & {:.2f} & {:.2f} \\\ \n'.format(c_k_graphit[i].magnitude, c_v_graphit[i].magnitude, c_v_graphit[i].magnitude/R.magnitude))
for i in range (0,3):
f.write('{{Zinn}} & {:.2f} & {:.2f} & {:.2f} \\\ \n'.format(c_k_blei[i], c_v_blei[i], c_v_blei[i] /R ))
f.write('{{Aluminium}} & {:.2f} & {:.2f} & {:.2f} \\\ \n'.format(c_k_alu, c_v_alu, c_v_alu /R))
f.write('\\bottomrule \n \\end{tabular} \n \\caption{Spezifische Wärmekapazitäten} \n \\label{tab: c_v} \n \\end{table}')
|
mit
|
themrmax/scikit-learn
|
sklearn/model_selection/_split.py
|
2
|
72920
|
"""
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import signature
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = np.bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
max_train_size : int, optional
Maximum size for a single training set.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(max_train_size=None, n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3, max_train_size=None):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
self.max_train_size = max_train_size
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
if self.max_train_size and self.max_train_size < test_start:
yield (indices[test_start - self.max_train_size:test_start],
indices[test_start:test_start + test_size])
else:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> logo.get_n_splits(groups=groups) # 'groups' is always required
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object, optional
Always ignored, exists for compatibility.
y : object, optional
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> lpgo.get_n_splits(groups=groups) # 'groups' is always required
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object, optional
Always ignored, exists for compatibility.
y : object, optional
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class _RepeatedSplits(with_metaclass(ABCMeta)):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, (np.integer, numbers.Integral)):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 0:
raise ValueError("Number of repetitions must be greater than 0.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
return cv.get_n_splits(X, y, groups) * self.n_repeats
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
See also
--------
RepeatedStratifiedKFold: Repeates Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedKFold, self).__init__(
KFold, n_repeats, random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedStratifiedKFold, self).__init__(
StratifiedKFold, n_repeats, random_state, n_splits=n_splits)
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
rng = check_random_state(rng)
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : boolean, optional (default=True)
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
shuffle = options.pop('shuffle', True)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for "
"shuffle=False")
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(n_samples, test_size,
train_size)
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
|
bsd-3-clause
|
Sentient07/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
72
|
7950
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
bsd-3-clause
|
ndtrung81/lammps
|
python/examples/matplotlib_plot.py
|
22
|
2270
|
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
sys.path.append("./pizza")
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
fig.canvas.draw()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...")
|
gpl-2.0
|
dekstop/alluvialflow
|
alluvialflow/alluvialflow.py
|
1
|
18513
|
"""
alluvialflow
~~~~~~~~~~~~
Alluvial flow visualisations in Python.
:copyright: 2015 by Martin Dittus, [email protected]
:license: AGPL3, see LICENSE.txt for more details
"""
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
# ===================
# = Flow data model =
# ===================
# interface
class FlowDataSource:
# Returns an ordered list of node names
def get_nodes(self):
raise Exception('Not implemented')
# returns a DataFrame[step, node; size]
def get_sequence(self):
raise Exception('Not implemented')
# returns a DataFrame[step1, node1, step2, node2; size]
def get_flows(self):
raise Exception('Not implemented')
# ==========
# = Layout =
# ==========
# TODO: find a more elegant way to skip missing node/flow entries without all these try/except blocks
class AlluvialFlowLayout:
# flow_data_source: a FlowDataSource instance
# scale_weights: a scaling function for scalars.
# compact: adjust vertical node spacing to current flow sizes? Otherwise keep it constant throughout.
def __init__(self, flow_data_source,
node_margin=50, node_width=0.02,
scale_weights=lambda n: n,
compact=True,
show_stationary_component=True):
self.flow_data_source = flow_data_source
self.node_margin = node_margin
self.node_width = node_width
self.scale_weights = scale_weights
self.compact = compact
self.show_stationary_component = show_stationary_component
self.__layout()
def __layout(self):
nodes = self.flow_data_source.get_nodes() # ordered list of node names
sequence = self.flow_data_source.get_sequence() # DataFrame[step, node; size]
steps = sequence.index.levels[0] # already sorted
flows = self.flow_data_source.get_flows() # DataFrame[step1, node1, step2, node2; size]
self.nodes = nodes
self.steps = steps
self.minx = 0
self.maxx = len(steps) - 1 + 0.3
self.miny = 0
self.maxy = 0 # will be updated during layout
# step -> x
self.step_x = dict(zip(steps, range(len(steps))))
# step -> node -> y1/y2
self.node1_y1 = defaultdict(lambda: dict())
self.node1_y2 = defaultdict(lambda: dict())
self.node2_y1 = defaultdict(lambda: dict())
self.node2_y2 = defaultdict(lambda: dict())
# node -> size
if self.compact==False:
self.node_maxsize = defaultdict(lambda: 0)
for step in steps:
for node1 in nodes:
try:
node = sequence.loc[step, node1]
node_size = self.scale_weights(node['size'])
self.node_maxsize[node1] = max(self.node_maxsize[node1], node_size)
except KeyError:
pass
# step -> node1 -> node2 -> y-center
self.edge_node1_y = defaultdict(lambda: defaultdict(lambda: dict())) # source
self.edge_node2_y = defaultdict(lambda: defaultdict(lambda: dict())) # destination
# step -> node1 -> node2 -> size
self.edge_size = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0))) # edge
for step1, step2 in zip(steps[:-1], steps[1:]):
# edge sizes
pos = self.miny
for node1 in nodes:
for node2 in nodes:
try:
flow = flows.loc[step1, node1, step2, node2]
size = self.scale_weights(flow['size'])
self.edge_size[step1][node1][node2] = size
except KeyError:
# skip missing node/flow entries... this awkward code pattern repeats below.
pass
# source ports
pos = self.miny
for node1 in nodes:
self.node1_y1[step1][node1] = pos
try:
node = sequence.loc[step1, node1]
node_size = self.scale_weights(node['size'])
total_node_flow_size = 0
for node2 in nodes:
try:
flow = flows.loc[step1, node1, step2, node2]
size = self.scale_weights(flow['size'])
self.edge_node1_y[step1][node1][node2] = pos + (size/2.0)
total_node_flow_size += size
pos += size
except KeyError:
pass
if self.show_stationary_component:
pos += node_size - total_node_flow_size # "in" flow
except KeyError:
pass
self.node1_y2[step1][node1] = pos
if self.compact==False:
pos = self.node1_y1[step1][node1] + self.node_maxsize[node1]
pos += self.node_margin
self.maxy = max(self.maxy, pos)
# destination ports
pos = self.miny
for node2 in nodes:
self.node2_y1[step2][node2] = pos
try:
node = sequence.loc[step2, node2]
node_size = self.scale_weights(node['size'])
total_node_flow_size = 0
for node1 in nodes:
try:
flow = flows.loc[step1, node1, step2, node2]
size = self.scale_weights(flow['size'])
self.edge_node2_y[step2][node1][node2] = pos + (size/2.0)
total_node_flow_size += size
pos += size
except KeyError:
pass
if self.show_stationary_component:
pos += node_size - total_node_flow_size # "out" flow
except KeyError:
pass
self.node2_y2[step2][node2] = pos
if self.compact==False:
pos = self.node2_y1[step2][node2] + self.node_maxsize[node2]
pos += self.node_margin
self.maxy = max(self.maxy, pos)
# ==========
# = Styles =
# ==========
# interface
class DiagramStyle:
def get_nodecolor(self, node):
raise Exception('Not implemented')
def get_nodealpha(self, node):
raise Exception('Not implemented')
def get_nodezorder(self, node):
raise Exception('Not implemented')
def get_edgecolor(self, step1, node1, step2, node2):
raise Exception('Not implemented')
def get_edgealpha(self, step1, node1, step2, node2):
raise Exception('Not implemented')
def get_edgezorder(self, step1, node1, step2, node2):
raise Exception('Not implemented')
def get_curve(self):
raise Exception('Not implemented')
def get_facecolor(self):
raise Exception('Not implemented')
def get_textcolor(self):
raise Exception('Not implemented')
def get_showlegend(self):
raise Exception('Not implemented')
# Blue nodes and edges.
class SimpleStyle(DiagramStyle):
def __init__(self,
nodecolor='#75A8EB', nodealpha=1.0,
edgecolor='#75A8EB', edgealpha=0.6,
curve=0.4,
facecolor='white', textcolor='black',
showlegend=True):
self.nodecolor = nodecolor
self.nodealpha = nodealpha
self.edgecolor = edgecolor
self.edgealpha = edgealpha
self.curve = curve
self.facecolor = facecolor
self.textcolor = textcolor
self.showlegend = showlegend
def get_nodecolor(self, node):
return self.nodecolor
def get_nodealpha(self, node):
return self.nodealpha
def get_nodezorder(self, node):
return None
def get_edgecolor(self, step1, node1, step2, node2):
return self.edgecolor
def get_edgealpha(self, step1, node1, step2, node2):
return self.edgealpha
def get_edgezorder(self, step1, node1, step2, node2):
return None
def get_curve(self):
return self.curve
def get_facecolor(self):
return self.facecolor
def get_textcolor(self):
return self.textcolor
def get_showlegend(self):
return self.showlegend
# Blue for ingroup-flows, grey for everything else.
class IngroupStyle(DiagramStyle):
def __init__(self, ingroup_nodes,
ingroup_color='#75A8EB', ingroup_zorder=1,
outgroup_color='#cccccc', outgroup_zorder=0,
nodealpha=1.0, edgealpha=0.9,
curve=0.4,
facecolor='white', textcolor='black',
showlegend=True):
self.ingroup_nodes = ingroup_nodes
self.ingroup_color = ingroup_color
self.ingroup_zorder = ingroup_zorder
self.outgroup_color = outgroup_color
self.outgroup_zorder = outgroup_zorder
self.nodealpha = nodealpha
self.edgealpha = edgealpha
self.curve = curve
self.facecolor = facecolor
self.textcolor = textcolor
self.showlegend = showlegend
def get_nodecolor(self, node):
if node in self.ingroup_nodes:
return self.ingroup_color
else:
return self.outgroup_color
def get_nodealpha(self, node):
return self.nodealpha
def get_nodezorder(self, node):
if node in self.ingroup_nodes:
return self.ingroup_zorder
else:
return self.outgroup_zorder
def get_edgecolor(self, step1, node1, step2, node2):
if (node1==node2) and (node1 in self.ingroup_nodes):
return self.ingroup_color
else:
return self.outgroup_color
def get_edgealpha(self, step1, node1, step2, node2):
return self.edgealpha
def get_edgezorder(self, step1, node1, step2, node2):
if (node1==node2) and (node1 in self.ingroup_nodes):
return self.ingroup_zorder
else:
return self.outgroup_zorder
def get_curve(self):
return self.curve
def get_facecolor(self):
return self.facecolor
def get_textcolor(self):
return self.textcolor
def get_showlegend(self):
return self.showlegend
# Blue for all ingroup source flows (inflows), grey for everything else.
class IngroupInflowStyle(IngroupStyle):
def get_edgecolor(self, step1, node1, step2, node2):
if node2 in self.ingroup_nodes:
return self.ingroup_color
else:
return self.outgroup_color
def get_edgealpha(self, step1, node1, step2, node2):
return self.edgealpha
def get_edgezorder(self, step1, node1, step2, node2):
if node2 in self.ingroup_nodes:
return self.ingroup_zorder
else:
return self.outgroup_zorder
# Blue for all ingroup destination flows (outflows), grey for everything else.
class IngroupOutflowStyle(IngroupStyle):
def get_edgecolor(self, step1, node1, step2, node2):
if node1 in self.ingroup_nodes:
return self.ingroup_color
else:
return self.outgroup_color
def get_edgealpha(self, step1, node1, step2, node2):
return self.edgealpha
def get_edgezorder(self, step1, node1, step2, node2):
if node1 in self.ingroup_nodes:
return self.ingroup_zorder
else:
return self.outgroup_zorder
# Blue for all ingroup source/destination flows (inflows and outflows), grey for everything else.
class IngroupAllflowStyle(IngroupStyle):
def get_edgecolor(self, step1, node1, step2, node2):
if (node1 in self.ingroup_nodes) or (node2 in self.ingroup_nodes):
return self.ingroup_color
else:
return self.outgroup_color
def get_edgealpha(self, step1, node1, step2, node2):
return self.edgealpha
def get_edgezorder(self, step1, node1, step2, node2):
if (node1 in self.ingroup_nodes) or (node2 in self.ingroup_nodes):
return self.ingroup_zorder
else:
return self.outgroup_zorder
# Maps nodes onto the full range of a cmap colour palette.
# Flows are coloured by their destination node.
# Any nodes not in the "nodes" list are considered part of the outgroup, and coloured differently.
class GradientStyle(DiagramStyle):
def __init__(self, nodes,
cmap=plt.get_cmap('YlOrRd'), ingroup_zorder=10,
outgroup_color='#666666', outgroup_zorder=1,
nodealpha=1.0, edgealpha=0.8,
curve=0.4,
facecolor='#181820', textcolor='#999999',
showlegend=True):
self.nodes = nodes
self.cmap = cmap
self.node_color_map = dict(zip(nodes, np.linspace(0, 1, len(nodes))))
self.ingroup_zorder = ingroup_zorder
self.outgroup_color = outgroup_color
self.outgroup_zorder = outgroup_zorder
self.nodealpha = nodealpha
self.edgealpha = edgealpha
self.curve = curve
self.facecolor = facecolor
self.textcolor = textcolor
self.showlegend = showlegend
def get_nodecolor(self, node):
if node in self.nodes:
return self.cmap(self.node_color_map[node])
else:
return self.outgroup_color
def get_nodealpha(self, node):
return self.nodealpha
def get_nodezorder(self, node):
if node in self.nodes:
return self.ingroup_zorder
else:
return self.outgroup_zorder
def get_edgecolor(self, step1, node1, step2, node2):
if node2 in self.nodes:
return self.cmap(self.node_color_map[node2])
else:
return self.outgroup_color
def get_edgealpha(self, step1, node1, step2, node2):
return self.edgealpha
def get_edgezorder(self, step1, node1, step2, node2):
if node2 in self.nodes:
return self.ingroup_zorder
else:
return self.outgroup_zorder
def get_curve(self):
return self.curve
def get_facecolor(self):
return self.facecolor
def get_textcolor(self):
return self.textcolor
def get_showlegend(self):
return self.showlegend
# ================
# = Plot helpers =
# ================
# centred on (x, y)
def box_path(x, y, w, h):
return Path([
(x - w/2.0, y - h/2.0),
(x - w/2.0, y + h/2.0),
(x + w/2.0, y + h/2.0),
(x + w/2.0, y - h/2.0),
(x - w/2.0, y - h/2.0),
], [
Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
])
def box_patch(x, y, w, h, color=None, label=None, **kwargs):
return patches.PathPatch(box_path(x, y, w, h),
linewidth=0, edgecolor=None,
facecolor=color, **kwargs)
# horizontal curve: 0..1, from straight line to hard curve around the midpoint.
def horiz_flow_path(x1, y1, x2, y2, curve):
dx = x2 - x1
midpoint = dx * curve
return Path([
(x1, y1),
(x1 + midpoint, y1),
(x2 - midpoint, y2),
(x2, y2)
], [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
])
def flow_patch(x1, y1, x2, y2, size, color=None, curve=0.7, **kwargs):
return patches.PathPatch(horiz_flow_path(x1=x1, y1=y1, x2=x2, y2=y2, curve=curve),
linewidth=size, edgecolor=color,
facecolor='none', **kwargs)
# ========
# = Plot =
# ========
class AlluvialFlowDiagram:
# alluvial_flow_layout: an AlluvialFlowLayoutalFlowLayout instance
def __init__(self, alluvial_flow_layout):
self.layout = alluvial_flow_layout
# size: plot size as (x, y) tuple
# style: a DiagramStyle instance
# credits: copyright string
def plot(self, size=(16,9), style=SimpleStyle(), credits=None):
fig = plt.figure(figsize=size, facecolor=style.get_facecolor())
ax = plt.gca()
# edges
point_height = size[1] * 72.0
yrange = self.layout.maxy - self.layout.miny
for step1, step2 in zip(self.layout.steps[:-1], self.layout.steps[1:]):
for node1 in self.layout.nodes:
for node2 in self.layout.nodes:
try:
size = self.layout.edge_size[step1][node1][node2]
line_width = (size * (point_height / yrange)) * 0.8 # corresponding width in points
node_w = self.layout.node_width / 2.0 #* 1.5 # slight overlap
ax.add_patch(flow_patch(
self.layout.step_x[step1] + node_w,
self.layout.edge_node1_y[step1][node1][node2],
self.layout.step_x[step2] - node_w,
self.layout.edge_node2_y[step2][node1][node2],
size=line_width,
color=style.get_edgecolor(step1, node1, step2, node2),
alpha=style.get_edgealpha(step1, node1, step2, node2),
zorder=style.get_edgezorder(step1, node1, step2, node2),
curve=style.get_curve()
))
except KeyError:
# TODO.. eww
pass
# nodes
for step1, step2 in zip(self.layout.steps[:-1], self.layout.steps[1:]):
for node in self.layout.nodes:
try:
# src port
x = self.layout.step_x[step1] + self.layout.node_width/2.0
y1 = self.layout.node1_y1[step1][node]
y2 = self.layout.node1_y2[step1][node]
ax.add_patch(box_patch(
x, (y1+y2)/2.0,
w=self.layout.node_width, h=(y2-y1),
label=node,
color=style.get_nodecolor(node),
alpha=style.get_nodealpha(node),
zorder=style.get_nodezorder(node)
))
# dst port
x = self.layout.step_x[step2] - self.layout.node_width/2.0
y1 = self.layout.node2_y1[step2][node]
y2 = self.layout.node2_y2[step2][node]
ax.add_patch(box_patch(
x, (y1+y2)/2.0,
w=self.layout.node_width, h=(y2-y1),
label=node,
color=style.get_nodecolor(node),
alpha=style.get_nodealpha(node),
zorder=style.get_nodezorder(node)
))
except KeyError:
# TODO.. eww
pass
# credits
if credits:
last_step = self.layout.steps[-1]
if self.layout.compact:
# on top of last node
last_node = self.layout.nodes[-1]
last_step_maxy = self.layout.node2_y2[last_step][last_node]
x = self.layout.step_x[last_step]
y = last_step_maxy + self.layout.node_margin
else:
# to the right of last step
x = self.layout.step_x[last_step] + 0.2
y = 0
plt.text(x, y, credits,
rotation='vertical', color=style.get_textcolor(),
horizontalalignment='center', verticalalignment='bottom')
# step labels
for step in self.layout.steps:
plt.text(self.layout.step_x[step], 0 - self.layout.node_margin,
step, rotation='vertical', color=style.get_textcolor(),
horizontalalignment='center', verticalalignment='top')
# node legend
if style.get_showlegend():
rev_nodes = self.layout.nodes[::-1] # reverse order
artists = [box_patch(0, 0,
w=self.layout.node_width, h=self.layout.node_width,
label=node, color=style.get_nodecolor(node), alpha=1)
for node in rev_nodes]
leg = plt.legend(artists, rev_nodes, frameon=False)
for node, txt in zip(rev_nodes, leg.get_texts()):
txt.set_color(style.get_nodecolor(node))
# txt.set_color(style.get_textcolor())
# ax.autoscale_view()
plt.axis('off')
plt.xlim(self.layout.minx, self.layout.maxx)
plt.ylim(self.layout.miny, self.layout.maxy)
return fig
|
agpl-3.0
|
thientu/scikit-learn
|
sklearn/feature_selection/rfe.py
|
64
|
17509
|
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
bsd-3-clause
|
Didou09/tofu
|
tofu/defaults.py
|
2
|
58540
|
"""
This module stores all the default setting of ToFu
Including in particular computing parameters, dictionnaries and figures
"""
#import matplotlib
#matplotlib.use('WxAgg')
#matplotlib.interactive(True)
import matplotlib.pyplot as plt
from matplotlib.path import Path
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
#from mayavi import mlab
import datetime as dtm
import time as time
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings generic
###############################################################################
###############################################################################
"""
# Default saving Paths
KeyRP = '/ToFu/src'
SaveAddIn = '/Objects_AUG/'
SaveAddObj = '/Objects_AUG/'
SaveAddOut = '/Outputs_AUG/'
MeshSaveAdd = '/Objects/'
dtmFormat = "D%Y%m%d_T%H%M%S"
AllowedExp = [None,'AUG','MISTRAL','JET','ITER','TCV','TS','WEST','KSTAR','Misc','Test']
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings of ToFu_Geom
###############################################################################
###############################################################################
"""
# ---------- Common to several classes ----------
Legpropd = {'size':10}
TorLegd = {'frameon':False,'ncol':1,'bbox_to_anchor':(1.01, 1),'loc':2,'borderaxespad':0.,'prop':Legpropd}
#####################################################################
######################## Ves class ################################
#####################################################################
# ------------ Computing settings ---------------
TorNP = 50
TorRelOff = 0.05
TorInsideNP = 100
TorSplprms = [100.,2.,3]
DetBaryCylNP1 = 50
DetBaryCylNP2 = 200
# --- Plotting dictionaries and parameters ------
TorPd = {'c':'k','lw':2}
TorId = {'c':'k','ls':'dashed','marker':'x','markersize':8,'mew':2}
TorBsd = {'c':'b','ls':'dashed','marker':'x','markersize':8,'mew':2}
TorBvd = {'c':'g','ls':'dashed','marker':'x','markersize':8,'mew':2}
TorVind = {'color':'r','scale':10}
TorITord = {'c':'k','ls':'dashed'}
TorBsTord = {'c':'b','ls':'dashed'}
TorBvTord = {'c':'g','ls':'dashed'}
TorNTheta = 50
Tor3DThetalim = [np.pi/2,2*np.pi]
Tor3DThetamin = np.pi/20.
TorP3Dd = {'color':(0.8,0.8,0.8,1.),'rstride':1,'cstride':1,'linewidth':0, 'antialiased':False}
TorPFilld = {'edgecolor':(0.8,0.8,0.8,1.),'facecolor':(0.8,0.8,0.8,1.),'linestyle':'solid','linewidth':1}
TorPAng = 'theta'
TorPAngUnit = 'rad'
TorPSketch = True
TorP3DFilld = {'color':(0.8,0.8,0.8,0.4),'linestyle':'solid','linewidth':0}
Vesdict = dict(Lax=None, Proj='All', Elt='PIBsBvV', Pdict=None, Idict=TorId, Bsdict=TorBsd, Bvdict=TorBvd, Vdict=TorVind,
IdictHor=TorITord, BsdictHor=TorBsTord, BvdictHor=TorBvTord, Lim=Tor3DThetalim, Nstep=TorNTheta, LegDict=TorLegd, draw=True, Test=True)
# -------------- Figures ------------------------
def Plot_LOSProj_DefAxes(Mode, Type='Tor', a4=False):
assert Mode in ['Cross','Hor','All'], "Arg should be 'Cross' or 'Hor' or 'All' !"
assert Type in ['Tor','Lin'], "Arg Type must be in ['Tor','Lin'] !"
if Mode == 'Cross':
fW,fH,fdpi,axCol = (6,8,80,'w') if not a4 else (8.27,11.69,80,'w')
axPos = [0.15, 0.15, 0.6, 0.7]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
if Type=='Tor':
ax.set_xlabel(r"R (m)"), ax.set_ylabel(r"Z (m)")
else:
ax.set_xlabel(r"Y (m)"), ax.set_ylabel(r"Z (m)")
ax.set_aspect(aspect="equal", adjustable='datalim')
return ax
elif Mode == 'Hor':
fW,fH,fdpi,axCol = (6,8,80,'w') if not a4 else (8.27,11.69,80,'w')
axPos = [0.15, 0.15, 0.6, 0.7]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
ax.set_xlabel(r"X (m)"), ax.set_ylabel(r"Y (m)")
ax.set_aspect(aspect="equal", adjustable='datalim')
return ax
elif Mode=='All':
fW,fH,fdpi,axCol = (16,8,80,'w') if not a4 else (11.69,8.27,80,'w')
axPosP, axPosT = [0.07, 0.1, 0.3, 0.8], [0.55, 0.1, 0.3, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
axP = f.add_axes(axPosP,frameon=True,axisbg=axCol)
axT = f.add_axes(axPosT,frameon=True,axisbg=axCol)
if Type=='Tor':
axP.set_xlabel(r"R (m)"), axP.set_ylabel(r"Z (m)")
else:
axP.set_xlabel(r"Y (m)"), axP.set_ylabel(r"Z (m)")
axT.set_xlabel(r"X (m)"), axT.set_ylabel(r"Y (m)")
axP.set_aspect(aspect="equal", adjustable='datalim')
axT.set_aspect(aspect="equal", adjustable='datalim')
return axP, axT
def Plot_3D_plt_Tor_DefAxes(a4=False):
fW,fH,fdpi,axCol = (14,10,80,'w') if not a4 else (11.69,8.27,80,'w')
axPos = [0.05, 0.05, 0.75, 0.85]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,axisbg=axCol,projection='3d')
ax.set_xlabel(r"X (m)")
ax.set_ylabel(r"Y (m)")
ax.set_zlabel(r"Z (m)")
ax.set_aspect(aspect="equal", adjustable='datalim')
return ax
def Plot_Impact_DefAxes(Proj, Ang='theta', AngUnit='rad', a4=False, Sketch=True):
if Proj == 'Cross':
fW,fH,fdpi,axCol = (10,6,80,'w') if not a4 else (11.69,8.27,80,'w')
axPos = [0.12, 0.12, 0.60, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax, axSketch = f.add_axes(axPos,frameon=True,axisbg=axCol), []
XAng = r"$\theta$" if Ang=='theta' else r"$\xi$"
XUnit = r"$(rad.)$" if AngUnit=='rad' else r"$(deg.)$"
XTickLab = [r"$0$",r"$\pi/4$",r"$\pi/2$",r"$3\pi/4$",r"$\pi$"] if AngUnit=='rad' else [r"$0$",r"$90$",r"$180$",r"$270$",r"$360$"]
ax.set_xlabel(XAng+r" "+XUnit)
ax.set_ylabel(r"$p$ $(m)$")
ax.set_xlim(0,np.pi)
ax.set_ylim(-1.5,1.5)
ax.set_xticks(np.pi*np.array([0.,1./4.,1./2.,3./4.,1.]))
ax.set_xticklabels(XTickLab)
if Sketch:
axSketch = f.add_axes([0.75, 0.10, 0.15, 0.15],frameon=False,axisbg=axCol)
Pt, Line, Hor, theta, ksi = np.array([[0,-0.8],[0,0.8]]), np.array([[-1.6,0.1],[0,1.7]]), np.array([[-0.4,0.2],[1.2,1.2]]), np.linspace(0,3.*np.pi/4.,30), np.linspace(0,np.pi/4.,10)
theta, ksi = np.array([0.3*np.cos(theta),0.3*np.sin(theta)]), np.array([-0.4+0.4*np.cos(ksi), 1.2+0.4*np.sin(ksi)])
axSketch.plot(Pt[0,:],Pt[1,:],'+k',Pt[0,:],Pt[1,:],'--k',Line[0,:],Line[1,:],'-k', Hor[0,:],Hor[1,:],'-k', theta[0,:],theta[1,:],'-k', ksi[0,:],ksi[1,:],'-k')
axSketch.annotate(r"$\theta$", xy=(0.3,0.4),xycoords='data',va="center", ha="center")
axSketch.annotate(r"$\xi$", xy=(0.1,1.4),xycoords='data',va="center", ha="center")
axSketch.annotate(r"$p$", xy=(-0.7,0.3),xycoords='data',va="center", ha="center")
axSketch.set_xticks([]), axSketch.set_yticks([])
axSketch.axis("equal")
return ax, axSketch
elif Proj.lower() == '3d':
fW,fH,fdpi,axCol = (11,9,80,'w') if not a4 else (11.69,8.27,80,'w')
axPos = [0.1, 0.1, 0.65, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,axisbg=axCol,projection='3d')
XAng = r"$\theta$" if Ang=='theta' else r"$\xi$"
XUnit = r"$(rad.)$" if AngUnit=='rad' else r"$(deg.)$"
XTickLab = [r"$0$",r"$\pi/4$",r"$\pi/2$",r"$3\pi/4$",r"$\pi$"] if AngUnit=='rad' else [r"$0$",r"$90$",r"$180$",r"$270$",r"$360$"]
ax.set_xlabel(XAng+r" "+XUnit)
ax.set_ylabel(r"$p$ $(m)$")
ax.set_zlabel(r"$\phi$ $(rad)$")
ax.set_xlim(0,np.pi)
ax.set_ylim(-1.5,1.5)
ax.set_zlim(-np.pi/2.,np.pi/2.)
ax.set_xticks(np.pi*np.array([0.,1./4.,1./2.,3./4.,1.]))
ax.set_xticklabels(XTickLab)
return [ax]
#def Plot_3D_mlab_Tor_DefFig():
# fW,fH,fBgC = 700,500,(1.,1.,1.)
# axPosP, axPosT = [0.07, 0.1, 0.3, 0.8], [0.55, 0.1, 0.3, 0.8]
# f = mlab.figure(bgcolor=fBgC,fgcolor=None,size=(fW,fH))
# return f
#####################################################################
######################## Struct class #############################
#####################################################################
# --- Plotting dictionaries and parameters ------
StructPd = {'edgecolor':'k','linewidth':1}
StructP3Dd = {'color':(0.8,0.8,0.8,1.),'rstride':1,'cstride':1,'linewidth':0, 'antialiased':False}
Vesdict = dict(Lax=None, Proj='All', Elt='PIBsBvV', Pdict=None, Idict=TorId, Bsdict=TorBsd, Bvdict=TorBvd, Vdict=TorVind,
IdictHor=TorITord, BsdictHor=TorBsTord, BvdictHor=TorBvTord, Lim=Tor3DThetalim, Nstep=TorNTheta, LegDict=TorLegd, draw=True, Test=True)
#####################################################################
######################## LOS class ################################
#####################################################################
# Number of points for plotting poloidal projection as a function of PolProjAng
def kpVsPolProjAng(x):
return np.ceil(25.*(1 - (x/(np.pi/4)-1)**2) + 2)
# ------------ Computing settings ---------------
LOSDiscrtSLim = (0.,1,)
LOSDiscrtSLMode = 'norm'
LOSDiscrtDS = 0.005
LOSDiscrtSMode = 'm'
# --- Plotting dictionaries and parameters ------
LOSLd = {'c':'k','lw':2}
LOSMd = {'c':'k','ls':'None','lw':2,'marker':'x','markersize':8,'mew':2}
LOSMImpd = {'c':'k','ls':'None','lw':2,'marker':'x','markersize':8,'mew':2}
LOSLplot = 'Tot'
LOSImpAng = 'theta'
LOSImpAngUnit = 'rad'
LOSImpElt = 'LV'
LOSdict = dict(Lax=None, Proj='All', Lplot=LOSLplot, Elt='LDIORP', EltVes='', Leg='', Ldict=LOSLd, MdictD=LOSMd, MdictI=LOSMd, MdictO=LOSMd, MdictR=LOSMd, MdictP=LOSMd, LegDict=TorLegd, Vesdict=Vesdict, draw=True, Test=True)
# -------------- Figures ------------------------
#####################################################################
###################### Lens class ################################
#####################################################################
# -------------- Figures ------------------------
def Plot_Lens_Alone_DefAxes(a4=False):
axCol = 'w'
(fW,fH) = (11.69,8.27) if a4 else (20,8)
axPos = [0.05, 0.1, 0.9, 0.85]
f = plt.figure(facecolor="w",figsize=(fW,fH))
ax = f.add_axes(axPos,axisbg=axCol)
ax.set_xlabel(r"x (m)")
ax.set_ylabel(r"y (m)")
return ax
#####################################################################
###################### Detect class ################################
#####################################################################
# ------------ Computing settings ---------------
DetSpanRMinMargin = 0.9
DetSpanNEdge = 5
DetSpanNRad = 5
DetConeNEdge = 8
DetConeNRad = 6
DetPreConedX12 = [0.01, 0.01]
DetPreConedX12Mode = 'abs'
DetPreConeds = 0.01
DetPreConedsMode = 'abs'
DetPreConeMarginS = 0.002
DetConeDX = 0.002
DetConeDRY = 0.0025 # 0.0025
DetConeDTheta = np.pi/1024. # 512.
DetConeDZ = 0.0025 # 0.0025
#DetConeNTheta = 25 # 25
#DetConeNZ = 50 # 50
DetConeRefdMax = 0.02
DetEtendMethod = 'quad'
DetEtenddX12 = [0.01, 0.01]
DetEtenddX12Mode = 'rel'
DetEtendepsrel = 1.e-3
DetEtendRatio = 0.02
DetCalcEtendColis = False
DetCalcSAngVectColis = True
DetCalcSAngVectCone = True
DetSynthEpsrel = 1.e-4
DetSynthdX12 = [0.005, 0.005]
DetSynthdX12Mode = 'abs'
DetSynthds = 0.005
DetSynthdsMode = 'abs'
DetSynthMarginS = 0.001
# --- Plotting dictionaries and parameters ------
ApPd = {'c':'k','lw':2,'ls':'solid'}
ApVd = {'color':'r','lw':2,'ls':'solid'}
DetPd = {'c':'k','lw':2,'ls':'solid'}
DetVd = {'color':'r','lw':2,'ls':'solid'}
DetSAngPld = {'cmap':plt.cm.YlOrRd,'lw':0.,'rstride':1,'cstride':1, 'antialiased':False, 'edgecolor':'None'}
DetSangPlContd = {'linewidths':0.}
DetConed = {'edgecolors':'k', 'facecolors':(0.8,0.8,0.8,0.2), 'alpha':0.2, 'linewidths':0., 'linestyles':'-', 'antialiaseds':False}
DetImpd = {'ls':'solid','c':'k','lw':1}
ApLVin = 0.1
DetSAngPlRa = 0.5
DetSAngPldX12 = [0.025,0.025]
DetSAngPldX12Mode = 'rel'
DetSAngPlRatio = 0.01
DetEtendOnLOSNP = 20
DetEtendOnLOSModes = ['trapz']
DetEtendOnLOSLd = {'ls':'solid','c':'k','lw':2}
DetSAngPlot = 'Int'
DetSAngPlotMode = 'scatter'
DetSAngPlotd = {'cmap':plt.cm.YlOrRd}
DetSAngPlotLvl = 20
DetSliceAGdR = 0.005
DetSliceAGdY = 0.005
DetSliceAGdX = 0.01
DetSliceAGdTheta = np.pi/512.
DetSliceAGdZ = 0.005
DetSliceNbd = {'scatter':{'cmap':plt.cm.Greys,'marker':'s','edgecolors':'None','s':10},
'contour':{'cmap':plt.cm.Greys},
'contourf':{'cmap':plt.cm.Greys},
'imshow':{'cmap':plt.cm.Greys}}
DetSliceSAd = {'scatter':{'cmap':plt.cm.YlOrRd,'marker':'s','edgecolors':'None','s':10, 'vmin':0},
'contour':{'cmap':plt.cm.YlOrRd, 'vmin':0},
'contourf':{'cmap':plt.cm.YlOrRd, 'vmin':0},
'imshow':{'cmap':plt.cm.YlOrRd, 'vmin':0}}
DetPolProjNTheta = 50
DetPolProjNZ = 25
DetSAngColis = True
GDetEtendMdA = {'ls':'None','c':'k','lw':2,'marker':'+'}
GDetEtendMdR = {'ls':'None','c':'b','lw':2,'marker':'x'}
GDetEtendMdS = {'ls':'None','c':'g','lw':2,'marker':'o'}
GDetEtendMdP = {'ls':'None','c':'r','lw':2,'marker':'*'}
GDetSigd = {'ls':'solid','c':'k','lw':2,'marker':'+'}
Apertdict = dict(Lax=None, Proj='All', Elt='PV', EltVes='', Leg='', LVIn=ApLVin, Pdict=ApPd, Vdict=ApVd, Vesdict=Vesdict, LegDict=TorLegd, draw=True, Test=True)
#Detdict =
# -------------- Figures ------------------------
def Plot_SAng_Plane_DefAxes(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (10,8,80,'w')
axPos = [0.05, 0.05, 0.9, 0.9]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,axisbg=axCol,projection='3d')
ax.set_xlabel(r"X1 (m)")
ax.set_ylabel(r"X2 (m)")
ax.set_zlabel(r"$\Omega$ (sr)")
return ax
def Plot_Etendue_AlongLOS_DefAxes(kMode='rel',a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (14,8,80,'w')
axPos = [0.06, 0.08, 0.70, 0.86]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
if kMode.lower()=='rel':
ax.set_xlabel(r"Rel. length (adim.)")
else:
ax.set_xlabel(r"Length (m)")
ax.set_ylabel(r"Etendue ($sr.m^2$)")
return ax
def Plot_CrossSlice_SAngNb_DefAxes(VType='Tor', a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (15,8,80,'w')
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
axSAng = f.add_axes([0.05, 0.06, 0.40, 0.85],frameon=True,axisbg=axCol)
axNb = f.add_axes([0.60, 0.06, 0.40, 0.85],frameon=True,axisbg=axCol)
if VType=='Tor':
axSAng.set_xlabel(r"R (m)"), axNb.set_xlabel(r"R (m)")
elif VType=='Lin':
axSAng.set_xlabel(r"Y (m)"), axNb.set_xlabel(r"Y (m)")
axSAng.set_ylabel(r"Z (m)"), axNb.set_ylabel(r"Z (m)")
axSAng.set_aspect(aspect="equal", adjustable='datalim')
axNb.set_aspect(aspect="equal", adjustable='datalim')
return axSAng, axNb
def Plot_HorSlice_SAngNb_DefAxes(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (15,8,80,'w')
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
axSAng = f.add_axes([0.07, 0.12, 0.35, 0.8],frameon=True,axisbg=axCol)
axNb = f.add_axes([0.55, 0.12, 0.35, 0.8],frameon=True,axisbg=axCol)
axSAng.set_xlabel(r"X (m)"), axSAng.set_ylabel(r"Y (m)")
axNb.set_xlabel(r"X (m)"), axNb.set_ylabel(r"Y (m)")
axSAng.set_aspect(aspect="equal", adjustable='datalim')
axNb.set_aspect(aspect="equal", adjustable='datalim')
return axSAng, axNb
def Plot_Etendues_GDetect_DefAxes(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (18,8,80,'w')
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes([0.05,0.1,0.85,0.80],frameon=True,axisbg=axCol)
ax.set_xlabel(r"")
ax.set_ylabel(r"Etendue (sr.m^2)")
return ax
def Plot_Sig_GDetect_DefAxes(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (18,8,80,'w')
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes([0.05,0.1,0.85,0.80],frameon=True,axisbg=axCol)
ax.set_xlabel(r"")
ax.set_ylabel(r"Signal (W)")
return ax
#Ldict_mlab_Def = {'color':(0.,0.,0.),'tube_radius':None}
#Mdict_mlab_Def = {'color':(0.,0.,0.),'line_width':1,'mode':'sphere'}
#Dict_3D_mlab_Tor_Def = {'color':(0.8,0.8,0.8),'opacity':0.15,'transparent':False,'scale_factor':0.1}
def Plot_GDetect_Resolution_DefAxes(VType='Tor', a4=False):
axCol = "w"
(fW,fH) = (11.69,8.27) if a4 else (16,10)
f = plt.figure(figsize=(fW,fH),facecolor=axCol)
ax1 = f.add_axes([0.05, 0.06, 0.32, 0.80], frameon=True, axisbg=axCol)
ax2 = f.add_axes([0.50, 0.55, 0.47, 0.40], frameon=True, axisbg=axCol)
ax3 = f.add_axes([0.50, 0.06, 0.47, 0.40], frameon=True, axisbg=axCol)
X1 = r"R (m)" if VType=='Tor' else r"Y (m)"
ax1.set_xlabel(X1)
ax1.set_ylabel(r"Z (m)")
ax2.set_xlabel(r"size (a.u.)")
ax2.set_ylabel(r"Signal (mW)")
ax3.set_xlabel(r"Channels index (from 0)")
ax3.set_ylabel(r"Signal (mW)")
ax1.set_aspect(aspect='equal',adjustable='datalim')
return ax1, ax2, ax3
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings of ToFu_Mesh
###############################################################################
###############################################################################
"""
# Computing
BF2IntMode = 'Surf'
Mesh1DDefName = 'NoName'
Mesh2DDefName = 'NoName'
L1DRes = 0.001
L1Mode = 'Larger'
L1Tol = 1e-14
L1IntOpSpa = False
L1IntOpSpaFormat = 'dia'
def BF2_DVect_DefR(Points):
Theta = np.arctan2(Points[1,:],Points[0,:])
return np.array([np.cos(Theta),np.sin(Theta),np.zeros((Points.shape[1],))])
def BF2_DVect_DefZ(Points):
return np.array([[0.],[0.],[1.]])*np.ones((1,Points.shape[1]))
def BF2_DVect_DefTor(Points):
Theta = np.arctan2(Points[1,:],Points[0,:])
return np.array([-np.sin(Theta),np.cos(Theta),np.zeros((Points.shape[1],))])
# --- Plotting dictionaries and parameters ------
Legpropd = {'size':10}
M2Legd = {'frameon':False,'ncol':1,'bbox_to_anchor':(1.22, 1.12),'loc':2,'borderaxespad':0.5,'prop':Legpropd}
M1Kd = {'c':'b', 'marker':'x', 'markersize':8, 'ls':'None', 'lw':3.}
M1Cd = {'c':'r', 'marker':'o', 'markersize':5, 'ls':'None', 'lw':1.}
M1Resd = {'c':'k', 'ls':'solid', 'lw':2.}
M2Bckd = {'color':(0.9,0.9,0.9), 'marker':'None', 'linestyle':'-', 'linewidth':1.}
M2Mshd = {'color':'k', 'marker':'None', 'linestyle':'-', 'linewidth':0.5}
M2Kd = {'c':'b', 'marker':'x', 'markersize':8, 'ls':'None', 'mew':2.}
M2Cd = {'c':'r', 'marker':'o', 'markersize':6, 'ls':'None', 'mew':0.}
M2Sd = {'cmap':plt.cm.YlOrRd,'edgecolor':None}
BF1Sub = 0.1
BF1SubMode = 'rel'
BF1Fd = {'lw':1,'ls':'solid'}
BF1Totd = {'c':'k','lw':2,'ls':'solid'}
BF2Sub = (0.1,0.1)
BF2SubMode = 'rel'
BF2PlotMode = 'contourf'
BF2PlotSubP = 0.25
BF2PlotSubMode = 'rel'
BF2PlotNC = 25
BF2PlotTotd = {'cmap':plt.cm.YlOrRd,'edgecolor':None}
BF2PlotIndSd = {'edgecolor':(0.8,0.8,0.8,1.),'facecolor':(0.8,0.8,0.8,1.),'linestyle':'solid','linewidth':1}
BF2PlotIndPd = {'c':'g', 'marker':'+', 'markersize':8, 'ls':'None', 'mew':2., 'lw':'none'}
# -------------- Figures ------------------------
def Plot_Mesh1D_DefAxes(a4=False):
fdpi,axCol = 80,'w'
(fW,fH) = (11.69,8.27) if a4 else (10,3)
axPos = [0.04, 0.17, 0.8, 0.7]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
ax.set_xlabel(r"X (m)")
return ax
def Plot_Res_Mesh1D_DefAxes(a4=False):
fdpi,axCol = 80,'w'
(fW,fH) = (11.69,8.27) if a4 else (10,5)
axPos = [0.06, 0.17, 0.75, 0.7]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
ax.set_xlabel(r"X (m)"), ax.set_ylabel(r"$\Delta$ X (m)")
return ax
def Plot_Mesh2D_DefAxes(VType='Tor', a4=False):
fdpi,axCol = 80,'w'
(fW,fH) = (11.69,8.27) if a4 else (8,8)
axPos = [0.12, 0.08, 0.68, 0.88]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
Xlab = r"R (m)" if VType=='Tor' else r"Y (m)"
ax.set_xlabel(Xlab)
ax.set_ylabel(r"Z (m)")
ax.set_aspect("equal", adjustable='datalim')
return ax
def Plot_Res_Mesh2D_DefAxes(a4=False, VType='Tor'):
fdpi,axCol = 80,'w'
(fW,fH) = (11.69,8.27) if a4 else (11,11)
axPos1, axPos2, axPos3, axPoscb = [0.1, 0.07, 0.60, 0.65], [0.75, 0.07, 0.23, 0.65], [0.1, 0.75, 0.60, 0.23], [0.75, 0.75, 0.03, 0.23]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax1 = f.add_axes(axPos1,frameon=True,axisbg=axCol)
ax2 = f.add_axes(axPos2,frameon=True,axisbg=axCol)
ax3 = f.add_axes(axPos3,frameon=True,axisbg=axCol)
axcb = f.add_axes(axPoscb,frameon=True,axisbg=axCol)
X1str = r"R (m)" if VType=='Tor' else r"Y (m)"
ax1.set_xlabel(X1str), ax1.set_ylabel(r"Z (m)")
ax2.set_xlabel(r"Res. (m)"), ax2.set_ylabel(r"")
ax3.set_xlabel(r""), ax3.set_ylabel(r"Res. (m)")
ax1.set_aspect("equal", adjustable='datalim')
return ax1, ax2, ax3, axcb
def Plot_BSpline_DefAxes(Mode):
if Mode=='1D':
fW,fH,fdpi,axCol = 12,6,80,'w'
axPos = [0.08, 0.1, 0.78, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
ax.set_xlabel(r"$X$ ($m$)")
ax.set_ylabel(r"$Y$ ($a.u.$)")
return ax
elif Mode=='2D':
fW,fH,fdpi,axCol = 10,8,80,'w'
axPos = [0.1, 0.1, 0.8, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
ax.set_xlabel(r"$R$ ($m$)")
ax.set_ylabel(r"$Z$ ($m$)")
ax.axis("equal")
return ax
elif Mode=='3D':
fW,fH,fdpi,axCol = 10,8,80,'w'
axPos = [0.1, 0.1, 0.8, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,axisbg=axCol,projection='3d')
ax.set_xlabel(r"$R$ ($m$)")
ax.set_ylabel(r"$Z$ ($m$)")
ax.set_zlabel(r"Emiss. (a.u.)")
return ax
def Plot_BSplineFit_DefAxes(Mode):
if Mode=='2D':
fW,fH,fdpi,axCol = 14,8,80,'w'
axPos1, axPos2 = [0.1, 0.1, 0.4, 0.8], [0.55, 0.1, 0.4, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax1 = f.add_axes(axPos1,frameon=True,axisbg=axCol)
ax2 = f.add_axes(axPos2,frameon=True,axisbg=axCol)
ax1.set_xlabel(r"$R$ ($m$)"), ax1.set_ylabel(r"$Z$ ($m$)")
ax2.set_xlabel(r"$R$ ($m$)")
ax1.axis("equal"), ax2.axis("equal")
return ax1, ax2
elif Mode=='3D':
fW,fH,fdpi,axCol = 10,8,80,'w'
axPos1, axPos2 = [0.1, 0.1, 0.4, 0.8], [0.55, 0.1, 0.4, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax1 = f.add_axes(axPos1,axisbg=axCol,projection='3d')
ax2 = f.add_axes(axPos2,axisbg=axCol,projection='3d')
ax1.set_xlabel(r"$R$ $(m)$"), ax1.set_ylabel(r"$Z$ $(m)$")
ax2.set_xlabel(r"$R$ $(m)$"), ax2.set_ylabel(r"$Z$ $(m)$")
ax1.set_zlabel(r"$Emiss.$ $(a.u.)$"), ax2.set_zlabel(r"$Emiss.$ $(a.u.)$")
return ax1, ax2
def Plot_BSpline_Deriv_DefAxes(Deg):
fW,fH,fdpi,axCol = 10,8,80,'w'
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = []
for ii in range(0,Deg+1):
ax.append(f.add_subplot(Deg+1,1,ii+1))
ax[ii].set_xlabel(r"x (a.u.)")
ax[ii].set_ylabel(r"$d^{"+str(ii)+"}$ (a.u.)")
return ax
def Plot_BaseFunc2D_BFuncMesh_DefAxes():
fW,fH,fdpi,axCol = 15,8,80,'w'
axPos1, axPos2 = [0.05, 0.1, 0.4, 0.8], [0.55, 0.1, 0.4, 0.8]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax1 = f.add_axes(axPos1,frameon=True,axisbg=axCol)
ax2 = f.add_axes(axPos2,axisbg=axCol,projection='3d')
ax1.set_xlabel(r"R (m)"), ax2.set_xlabel(r"R (m)")
ax1.set_ylabel(r"Z (m)"), ax2.set_ylabel(r"Z (m)")
ax2.set_zlabel(r"Z (a.u.)")
ax1.axis("equal")
return ax1, ax2
def Plot_BFunc_SuppMax_PolProj_DefAxes():
fW,fH,fdpi,axCol = 6,8,80,'w'
axPos = [0.15, 0.15, 0.6, 0.7]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
ax.set_xlabel(r"R (m)")
ax.set_ylabel(r"Z (m)")
ax.axis("equal")
return ax
def Plot_BF2_interp_DefAxes():
fW,fH,fdpi,axCol = 12,8,80,'w'
axPosV1, axPosC1 = [0.08, 0.05, 0.4, 0.9], [0.56, 0.05, 0.4, 0.9]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
axV1 = f.add_axes(axPosV1,frameon=True,axisbg=axCol)
axC1 = f.add_axes(axPosC1,frameon=True,axisbg=axCol)
axV1.set_xlabel(r"R (m)"), axC1.set_xlabel(r"R (m)")
axV1.set_ylabel(r"Z (m)"), axC1.set_ylabel(r"Z (m)")
axV1.set_title("Input"), axC1.set_title("Output")
axV1.axis("equal"), axC1.axis("equal")
return axV1, axC1
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings of tofu.Eq
###############################################################################
###############################################################################
"""
# --- Plotting dictionaries and parameters ------
Eq2DPlotDict = {'scatter':{'cmap':plt.cm.YlOrRd, 'marker':'s','edgecolors':'None', 's':10},
'contour':{'cmap':plt.cm.YlOrRd},
'contourf':{'cmap':plt.cm.YlOrRd},
'imshow':{'cmap':plt.cm.YlOrRd}}
Eq2DMagAxDict = {'ls':'None', 'lw':0., 'marker':'+', 'ms':10, 'c':'k', 'mew':2.}
Eq2DSepDict = {'ls':'-', 'lw':1., 'marker':'None', 'c':'k'}
Eq2DPlotRadDict = {'pivot':'tail', 'color':'b', 'units':'xy', 'angles':'xy', 'scale':1., 'scale_units':'xy', 'width':0.003, 'headwidth':3, 'headlength':5}
Eq2DPlotPolDict = {'pivot':'tail', 'color':'r', 'units':'xy', 'angles':'xy', 'scale':1., 'scale_units':'xy', 'width':0.003, 'headwidth':3, 'headlength':5}
Eq2DPlotVsDict = {'ls':'None', 'lw':0., 'marker':'+', 'ms':8, 'color':'k', 'mec':'k'}
# -------------- Figures ------------------------
def Plot_Eq2D_DefAxes(VType='Tor', cbar=False, a4=False):
fdpi,axCol = 80,'w'
(fW,fH) = (8.27,11.69) if a4 else (8,10)
axPos = [0.12, 0.08, 0.68, 0.86]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
axc = f.add_axes([0.82,0.08,0.05,0.60],frameon=True,axisbg=axCol) if cbar else None
Xlab = r"R (m)" if VType=='Tor' else r"Y (m)"
ax.set_xlabel(Xlab)
ax.set_ylabel(r"Z (m)")
ax.set_aspect("equal", adjustable='datalim')
return ax, axc
def Plot_Eq2D_Vs_DefAxes(VType='Tor', a4=False):
fdpi,axCol = 80,'w'
(fW,fH) = (11.69,8.27) if a4 else (11,8)
axPos = [0.07, 0.06, 0.78, 0.87]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes(axPos,frameon=True,axisbg=axCol)
return ax
def Plot_Eq2D_Inter_DefAxes(VType='Tor', cbar=False, a4=False):
fdpi,axCol = 80,'w'
(fW,fH) = (11.69,8.27) if a4 else (16.,11.3)
f = plt.figure(facecolor="w", figsize=(fW,fH), dpi=fdpi)
ax1 = f.add_axes([0.05, 0.05, 0.40, 0.90], frameon=True,axisbg=axCol)
ax2 = f.add_axes([0.55, 0.55, 0.43, 0.40], frameon=True, axisbg=axCol)
ax3 = f.add_axes([0.55, 0.05, 0.43, 0.40], frameon=True, axisbg=axCol)
axc = f.add_axes([0.47, 0.05,0.03,0.60],frameon=True,axisbg=axCol) if cbar else None
Xlab = r"R (m)" if VType=='Tor' else r"Y (m)"
ax1.set_xlabel(Xlab), ax1.set_ylabel(r"Z (m)")
ax2.set_xlabel(Xlab)
ax3.set_xlabel(r"t (s)")
ax1.set_aspect("equal", adjustable='datalim')
return {'2DProf':[ax1], '1DProf':[ax2], '1DTime':[ax3], 'Misc':[axc], '1DConst':None, '2DConst':None}
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings of ToFu_MatComp
###############################################################################
###############################################################################
"""
# ------------ Computing settings ---------------
GMindMSubP = 0.1
GMindMSubPMode = 'Rel'
GMindMSubTheta = 0.02
GMindMSubThetaMode = 'Rel'
GMMatMSubP = 0.1
GMMatMSubPMode = 'Rel'
GMMatMSubTheta = 0.02
GMMatMSubThetaMode = 'Rel'
GMMatepsrel = 1.e-4
GMMatMode = 'trapz'
GMMatDMinInf = 0.0005
GMMatLOSMode = 'quad'
GMMatLOSeps = 1.e-4
GMMatLOSSubP = 0.01
GMMatLOSSubPMode = 'Rel'
GMSigPlotSubP = 0.5
GMSigPlotSubPMode = 'Rel'
GMSigPlotNC = 30
# --- Plotting dictionaries and parameters ------
GMPlotDetSubP = 0.1
GMPlotDetSubPMode = 'Rel'
GMPlotDetKWArgMesh = {'Elt':'M','Mshdict':{'color':(0.9,0.9,0.9), 'marker':'None', 'linestyle':'-', 'linewidth':1.,'zorder':-10}}#'MBKC'
GMPlotDetKWArgTor = {'Elt':'P'}
GMPlotDetKWArgDet = {'Elt':'PC', 'EltApert':'P','EltLOS':'L','EltTor':'P'}
GMPlotBFDetd = {'Elt':'C','Conedict':{'edgecolors':'none','facecolors':(0.8,0.8,0.8,0.2),'linewidths':0.,'zorder':10},'EltLOS':'L','Ldict':{'lw':2,'zorder':10}}
GMPlotBFSubP = 0.05
GMPlotBFSubPMode = 'Rel'
GMPlotBFKWArgLOS = {'Elt':'L'}
GMPlotDetCd = {'cmap':plt.cm.YlOrRd,'edgecolors':'none','linewidths':0.}
GMPlotDetLd = {'lw':2,'ls':'-','c':'k'}
GMPlotDetLOSd = {'lw':2,'ls':'--','c':'k'}
GMSigPlotSd = {'lw':2}
GMSigPlotSLOSd = {'Elt':'L', 'Ldict':{'lw':1}}
GMSigPlotCd = {'cmap':plt.cm.Greys} # or gray_r or Greys or hot_r
# -------------- Figures ------------------------
def Plot_GeomMatrix_Mesh_DefAxes(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (16,10,80,'w')
axPos1, axPos2, axPos3 = [0.05, 0.07, 0.32, 0.87], [0.48, 0.55, 0.5, 0.4], [0.48, 0.07, 0.5, 0.4]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax1 = f.add_axes(axPos1,frameon=True,axisbg=axCol)
ax2 = f.add_axes(axPos2,frameon=True,axisbg=axCol)
ax3 = f.add_axes(axPos3,frameon=True,axisbg=axCol)
ax1.set_xlabel(r"R (m)"), ax2.set_xlabel(r"Mesh elements index (starts at 0)"), ax3.set_xlabel(r"Basis functions index (starts at 0)")
ax1.set_ylabel(r"Z (m)"), ax2.set_ylabel(r"Contribution ($W/sr/m^3 x sr.m^3$)"), ax3.set_ylabel(r"Contribution $W/sr/m^3 x sr.m^3$")
ax1.axis("equal")
return ax1, ax2, ax3
def Plot_BF2_sum_DefAxes(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (14,10,80,'w')
axPos1, axPos2 = [0.05, 0.05, 0.85, 0.4], [0.05, 0.55, 0.85, 0.4]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax1 = f.add_axes(axPos1,frameon=True,axisbg=axCol)
ax2 = f.add_axes(axPos2,frameon=True,axisbg=axCol)
ax1.set_xlabel(r"Detect index (starts at 0)"), ax2.set_xlabel(r"BFunc index (starts at 0)")
ax1.set_ylabel(r"Contribution ($sr.m^3$)"), ax2.set_ylabel(r"Contribution ($sr.m^3$)")
return ax1, ax2
def Plot_SynDiag_DefAxes(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (17,10,80,'w')
axPos1, axPos2, axPos3, axPos4 = [0.05, 0.60, 0.92, 0.37], [0.08, 0.05, 0.20, 0.50], [0.37, 0.05, 0.26, 0.50], [0.70, 0.05, 0.20, 0.50]
f = plt.figure(facecolor="w",figsize=(fW,fH),dpi=fdpi)
ax1 = f.add_axes(axPos1,frameon=True,axisbg=axCol)
ax2 = f.add_axes(axPos2,frameon=True,axisbg=axCol)
ax3 = f.add_axes(axPos3,frameon=True,axisbg=axCol)
ax4 = f.add_axes(axPos4,frameon=True,axisbg=axCol)
ax1.set_xlabel(r""), ax2.set_xlabel(r"R (m)"), ax3.set_xlabel(r"R (m)"), ax4.set_xlabel(r"R (m)")
ax1.set_ylabel(r"SXR (W)"), ax2.set_ylabel(r"Z (m)")#, ax3.set_ylabel(r"Z (m)"), ax4.set_ylabel(r"Z (m)")
ax2.axis("equal"), ax3.axis("equal"), ax4.axis("equal")
return ax1, ax2, ax3, ax4
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings of ToFu_Treat
###############################################################################
###############################################################################
"""
def Plot_TreatSig_Def(a4=False, nMax=4):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (22,10,80,'w')
f = plt.figure(facecolor=axCol,figsize=(fW,fH),dpi=fdpi)
ax3D = f.add_axes([0.05, 0.55, 0.35, 0.4],frameon=True,axisbg=axCol)
axtime = f.add_axes([0.05, 0.05, 0.35, 0.4],frameon=True,axisbg=axCol)
axprof = f.add_axes([0.45, 0.05, 0.53, 0.85],frameon=True,axisbg=axCol)
LaxTxtChan, LaxTxtTime = [], []
width, w2 = (0.40-0.13)/nMax, (0.98-0.45)/nMax
for ii in range(0,nMax):
LaxTxtChan.append(f.add_axes([0.13+ii*width, 0.45, width, 0.05],frameon=False,axisbg=axCol))
LaxTxtChan[ii].spines['top'].set_visible(False), LaxTxtChan[ii].spines['bottom'].set_visible(False)
LaxTxtChan[ii].spines['right'].set_visible(False), LaxTxtChan[ii].spines['left'].set_visible(False)
LaxTxtChan[ii].set_xticks([]), LaxTxtChan[ii].set_yticks([])
LaxTxtChan[ii].set_xlim(0,1), LaxTxtChan[ii].set_ylim(0,1)
LaxTxtTime.append(f.add_axes([0.45+ii*w2, 0.95, w2, 0.04],frameon=False,axisbg=axCol))
LaxTxtTime[ii].spines['top'].set_visible(False), LaxTxtTime[ii].spines['bottom'].set_visible(False)
LaxTxtTime[ii].spines['right'].set_visible(False), LaxTxtTime[ii].spines['left'].set_visible(False)
LaxTxtTime[ii].set_xticks([]), LaxTxtTime[ii].set_yticks([])
LaxTxtTime[ii].set_xlim(0,1), LaxTxtTime[ii].set_ylim(0,1)
ax3D.set_xlabel(r"time (s)"), ax3D.set_ylabel(r"Channel index")
axtime.set_xlabel(r"time (s)"), axtime.set_ylabel(r"SXR Mes. (mW)")
axprof.set_xlabel(r"Channel index"), axprof.set_ylabel(r"SXR Mes. (mW)")
axtime.grid(True), axprof.grid(True)
return ax3D, axtime, axprof, LaxTxtChan, LaxTxtTime
def Plot_Noise_Def(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (20,10,80,'w')
f = plt.figure(facecolor=axCol,figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes([0.07, 0.08, 0.74, 0.85],frameon=True,axisbg=axCol)
ax.set_xlabel(r"Phys. (a.u.)"), ax.set_ylabel(r"Noise (a.u.)")
ax.grid(True)
return ax
def Plot_FFTChan_Def(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (9,5,80,'w')
f = plt.figure(facecolor=axCol,figsize=(fW,fH),dpi=fdpi)
ax = f.add_axes([0.08, 0.1, 0.90, 0.85],frameon=True,axisbg=axCol)
ax.set_xlabel(r"time (s)")
ax.set_ylabel(r"Freq. (kHz)")
ax.grid(True)
return ax
def Plot_FFTInter_Def(a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (16,11.3,80,'w')
f = plt.figure(facecolor=axCol,figsize=(fW,fH),dpi=fdpi)
ax21 = f.add_axes([0.05, 0.55, 0.40, 0.40],frameon=True,axisbg=axCol)
ax22 = f.add_axes([0.05, 0.05, 0.40, 0.40],frameon=True,axisbg=axCol)
axt = f.add_axes([0.55, 0.55, 0.40, 0.40],frameon=True,axisbg=axCol)
axF = f.add_axes([0.55, 0.05, 0.40, 0.40],frameon=True,axisbg=axCol)
ax21.set_ylabel(r"Freq. (kHz)"), ax21.set_title(r"Pow. spectrum norm. to max")
ax22.set_xlabel(r"time (s)"), ax22.set_ylabel(r"Freq. (kHz)"), ax22.set_title(r"Pow. spectrum norm. to instantaneous max")
axt.set_xlabel(r"time (s)"), axt.set_ylabel(r"Harm. magnitude$^2$ (a.u.)")
axF.set_xlabel(r"Freq. (kHz)"), axF.set_ylabel(r"Harm. magnitude$^2$ (a.u.)")
ax21.grid(True), ax22.grid(True), axt.grid(True), axF.grid(True)
return ax21, ax22, axt, axF
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings of ToFu_Inv
###############################################################################
###############################################################################
"""
# Probability law for augmented tikho : p(x) = x^(a-1)*exp(-bx)
# If normalised :
# Mean [x] a / b
# Variance <x> a / b^2
# If not normalised :
# Mean [x] a! / (b^(a+1))
# Variance <x> a!/b^(2a+2) * ( (a+1)b^a - a! )
# [x] = k => b = (a!/k)^(1/(a+1))
# if [x]=1 => <x> = (a+1)* (a!)^(1/(a+1)) - 1
ConvCrit = 1.e-6
chi2Tol = 0.05
chi2Obj = 1.
mu0 = 1000.
AugTikho_a0 = 10 # (Regul. parameter, larger a => larger variance)
AugTikho_b0 = np.math.factorial(AugTikho_a0)**(1/(AugTikho_a0+1)) # To have [x] = 1
AugTikho_a1 = 2 # (Noise), a as small as possible for small variance
AugTikho_b1 = np.math.factorial(AugTikho_a1)**(1/(AugTikho_a1+1)) # To have [x] = 1
AugTikho_d = 0.95 # Exponent for rescaling of a0bis in V2, typically in [1/3 ; 1/2], but real limits are 0 < d < 1 (or 2 ?)
AugTkLsmrAtol = 1.e-8
AugTkLsmrBtol = 1.e-8
AugTkLsmrConlim = 1e8
AugTkLsmrMaxiter = None
SolInvParDef = {'Dt':None,'mu0':mu0,'SolMethod':'InvLin_AugTikho_V1','Deriv':'D2N2','IntMode':'Vol','Cond':None,'ConvCrit':ConvCrit,'Sparse':True,'SpType':'csr', 'Sep':{'In':True,'NLim':3}, 'Pos':True, 'KWARGS': {'a0':AugTikho_a0, 'b0':AugTikho_b0, 'a1':AugTikho_a1, 'b1':AugTikho_b1, 'd':AugTikho_d, 'ConvReg':True, 'FixedNb':True}, 'timeit':False, 'Verb':False,'VerbNb':None,'Method':'Poly','Deg':1,'Group':True,'plot':False,'LNames':None,'Com':''}
# --- Plotting dictionaries and parameters ------
InvPlotSubP = 0.01
InvPlotSubMode = 'abs'
InvAnimIntervalms = 100
InvAnimBlit = True
InvAnimRepeat = True
InvAnimRepeatDelay = 500
InvAnimTimeScale = 1.e2
InvPlotF = 'imshow'
InvLvls = 30
Invdict = {'cmap':plt.cm.YlOrRd,'edgecolor':None}
Tempd = {'ls':'-','c':'k'}
Retrod = {'ls':'-','c':'b'}
InvSXRd = {'ls':'-','c':'k','lw':1.}
InvSigmad = {'facecolor':(0.8,0.8,0.8,0.7),'lw':0.}
"""
"""
# -------------- Figures ------------------------
def Plot_Inv_Anim_DefAxes(SXR=True, TMat=True, Chi2N=True, Mu=True, R=True, Nit=True):
axCol = 'w'
if not any([SXR,TMat,Chi2N,Mu,R,Nit]):
fW,fH = 8,10
f = plt.figure(figsize=(fW,fH),facecolor=axCol)
axInvPos = [0.10, 0.10, 0.75, 0.80]
axcPos = [0.87, 0.10, 0.10, 0.70]
axTMat = None
tempPos = []
elif SXR and not any([TMat,Chi2N,Mu,R,Nit]):
fW,fH = 8,12
f = plt.figure(figsize=(fW,fH),facecolor=axCol)
axInvPos = [0.10, 0.30, 0.70, 0.65]
axcPos = [0.82, 0.30, 0.10, 0.55]
tempPos = [[0.10, 0.05, 0.85, 0.20]]
tempylab = [r"SXR (mW)"]
axTMat = None
elif SXR and TMat and not any([Chi2N,Mu,R,Nit]):
fW,fH = 16,10
f = plt.figure(figsize=(fW,fH),facecolor=axCol)
axInvPos = [0.05, 0.06, 0.32, 0.80]
axcPos = [0.38, 0.06, 0.03, 0.70]
tempPos = [[0.50, 0.06, 0.47, 0.40]]
tempylab = [r"SXR (mW)"]
TMatPos = [0.50, 0.55, 0.47, 0.40]
TMatylab = r"SXR (mW)"
elif not SXR and any([Chi2N,Mu,R,Nit]):
fW,fH = 16,10
f = plt.figure(figsize=(fW,fH),facecolor=axCol)
axInvPos = [0.05, 0.06, 0.35, 0.80]
axcPos = [0.41, 0.06, 0.04, 0.70]
tempylab = [r"Nb. iterations", r"$\chi^2_N$", r"Reg. param. (a.u.)", r"Obj. func. (a.u.)"]
tempPos = [[0.50, 0.75, 0.47, 0.21], [0.50, 0.52, 0.47, 0.21], [0.50, 0.29, 0.47, 0.21], [0.50, 0.06, 0.47, 0.21]]
temps = [Nit,Chi2N,Mu,R]
for ii in range(0,len(temps)):
if not temps[ii]:
del tempPos[ii]
del tempylab[ii]
axTMat = None
else:
fW,fH = 18,12
f = plt.figure(figsize=(fW,fH),facecolor=axCol)
axInvPos = [0.05, 0.05, 0.32, 0.80]
axcPos = [0.38, 0.05, 0.03, 0.70]
tempylab = [r"SXR (mW)", r"Nb. iterations", r"$\mathbf{\chi^2_N}$", r"Reg. param. (a.u.)", r"Obj. func. (a.u.)"]
tempPos = [[0.50, 0.61, 0.47, 0.17], [0.50, 0.47, 0.47, 0.12], [0.50, 0.33, 0.47, 0.12], [0.50, 0.19, 0.47, 0.12], [0.50, 0.05, 0.47, 0.12]]
temps = [SXR,Nit,Chi2N,Mu,R]
for ii in range(0,len(temps)):
if not temps[ii]:
del tempPos[ii]
del tempylab[ii]
if TMat:
TMatPos = [0.50, 0.81, 0.47, 0.17]
TMatylab = r"SXR (mW)"
else:
axTMat = None
if TMat:
axTMat = f.add_axes(TMatPos, frameon=True, axisbg=axCol)
axTMat.set_ylabel(TMatylab, fontsize=12, fontweight='bold')
Laxtemp = []
if len(tempPos)>0:
ypos = np.array([pp[1] for pp in tempPos])
indmin = np.argmin(ypos)
for ii in range(0,len(tempPos)):
Laxtemp.append(f.add_axes(tempPos[ii], frameon=True, axisbg=axCol))
Laxtemp[-1].set_ylabel(tempylab[ii], fontsize=12, fontweight='bold')
if not ii == indmin:
Laxtemp[-1].set_xticklabels([])
Laxtemp[np.argmin(ypos)].set_xlabel(r"t (s)", fontsize=12, fontweight='bold')
axInv = f.add_axes(axInvPos, frameon=True, axisbg=axCol)
axInv.set_xlabel(r"R (m)", fontsize=12, fontweight='bold')
axInv.set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
axInv.axis('equal')
axc = f.add_axes(axcPos, frameon=True, axisbg=axCol)
return axInv, axTMat, Laxtemp, axc
def Plot_Inv_FFTPow_DefAxes(NPts=6, a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (18,12,80,'w')
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=fdpi)
axInv = f.add_axes([0.05, 0.05, 0.32, 0.80], frameon=True, axisbg=axCol)
axc = f.add_axes([0.38, 0.05, 0.03, 0.70], frameon=True, axisbg=axCol)
axRetro = f.add_axes([0.50, 0.81, 0.47, 0.17], frameon=True, axisbg=axCol)
axSXR = f.add_axes([0.50, 0.61, 0.47, 0.17], frameon=True, axisbg=axCol)
axInv.set_xlabel(r"R (m)", fontsize=12, fontweight='bold')
axInv.set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
axInv.axis('equal')
axRetro.set_xlabel(r"Channels"), axRetro.set_ylabel(r"SXR (mW)")
axSXR.set_ylabel(r"SXR (mW)")
Lax = []
if NPts <= 4:
N2 = 1
Hei = (0.58-0.05)/NPts - (NPts-1)*Dis
for ii in range(0,NPts):
Lax.append(f.add_axes([0.50, 0.58-(ii+1)*Hei-ii*Dis, 0.47, Hei], frameon=True, axisbg=axCol))
Lax[-1].set_ylabel(r"Freq. (kHz)", fontsize=12, fontweight='bold')
Lax[-1].grid(True)
Lax[-1].set_xlabel(r"time (s)", fontsize=12, fontweight='bold')
else:
N2, Dis = int(np.ceil(NPts/2.)), 0.03
Hei = (0.58-0.05-(N2-1)*Dis)/N2
LaxPos = [[[0.50, 0.58-(ii+1)*Hei-ii*Dis, 0.22, Hei],[0.75, 0.58-(ii+1)*Hei-ii*Dis, 0.22, Hei]] for ii in range(0,N2)]
#LaxPos = list(itt.chain._from_iterable(LaxPos))
for ii in range(0,N2):
Lax.append(f.add_axes(LaxPos[ii][0], frameon=True, axisbg=axCol))
Lax[-1].set_ylabel(r"Freq. (kHz)", fontsize=12, fontweight='bold')
Lax[-1].grid(True)
Lax.append(f.add_axes(LaxPos[ii][1], frameon=True, axisbg=axCol))
Lax[-1].grid(True)
Lax[-1].set_xticklabels([]), Lax[-1].set_yticklabels([])
Lax[-2].set_xticklabels([])
Lax[-2].set_xlabel(r"time (s)", fontsize=12, fontweight='bold')
Lax[-1].set_xlabel(r"time (s)", fontsize=12, fontweight='bold')
return axInv, axc, axRetro, axSXR, Lax
"""
###############################################################################
###############################################################################
###############################################################################
Defaults settings of ToFu_PostTreat
###############################################################################
###############################################################################
"""
# --- Plotting dictionaries and parameters ------
InvPlotSubP = 0.01
InvPlotSubMode = 'abs'
InvPlotF = 'contour' #'imshow'
InvLvls = 30
Invdict = {'cmap':plt.cm.YlOrRd,'edgecolor':None}
Tempd = {'ls':'-','c':'k'}
Retrod = {'ls':'-'}
vlined = {'c':'k','ls':'--','lw':1.}
InvSXRd = {'ls':'-','c':'k','lw':1.}
InvSigmad = {'facecolor':(0.8,0.8,0.8,0.7),'lw':0.}
InvPlot_LPath = [((1.70,0.),(1.,0.)), ((1.70,0.),(0.,1.)), ((1.70,0.20),(1.,0.)), ((1.70,-0.20),(1.,0.)), ((1.55,0.),(0.,1.)), ((1.85,0.),(0.,1.))]
InvPlot_dl = 0.0025
InvPlot_LCol = ['b','r','g','m','y','c']
TFPT_prop = {'Deriv':0, 'indt0':0, 't0':None, 'DVect':BF2_DVect_DefR, 'SubP':InvPlotSubP, 'SubMode':InvPlotSubMode, 'InvPlotFunc':InvPlotF, 'InvLvls':InvLvls, 'Invd':Invdict,
'vlined':vlined, 'SXRd':InvSXRd, 'Sigmad':InvSigmad, 'Tempd':Tempd, 'Retrod':Retrod, 'VMinMax':[None,None], 'Com':'', 'Norm':False, 'a4':False}
TFPT_propbasic = {}
TFPT_proptechnical = {}
TFPT_propprofiles= {'LPath':InvPlot_LPath, 'dl':InvPlot_dl}
TFPT_propbasic.update(TFPT_prop)
TFPT_proptechnical.update(TFPT_prop)
TFPT_propprofiles.update(TFPT_prop)
TFPT_Lprop = {'basic':TFPT_propbasic, 'technical':TFPT_proptechnical, 'profiles':TFPT_propprofiles, 'sawtooth':None}
InvAnimIntervalms = 100
InvAnimBlit = True
InvAnimRepeat = True
InvAnimRepeatDelay = 500
InvAnimTimeScale = 1.e2
# -------------- Figures ------------------------
def Plot_Inv_Basic_DefAxes(a4=False, dpi=80):
axCol = 'w'
(fW,fH) = (16,10) if not a4 else (11.69,8.27)
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=dpi)
axInv = f.add_axes([0.05, 0.06, 0.32, 0.80], frameon=True, axisbg=axCol)
axc = f.add_axes([0.38, 0.06, 0.03, 0.70], frameon=True, axisbg=axCol)
axTMat = f.add_axes([0.50, 0.55, 0.47, 0.40], frameon=True, axisbg=axCol)
axSig = f.add_axes([0.50, 0.06, 0.47, 0.40], frameon=True, axisbg=axCol)
axTxt = f.add_axes([0.05, 0.86, 0.32, 0.04], frameon=False, axisbg='none')
axInv.set_xlabel(r"R (m)", fontsize=12, fontweight='bold')
axInv.set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
axTMat.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axSig.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axSig.set_xlabel(r"t (s)", fontsize=12, fontweight='bold')
axTxt.set_xticks([]), axTxt.set_yticks([])
axTxt.set_xticklabels([]), axTxt.set_yticklabels([])
axc.set_title(r"$\epsilon$ (W/m^3)", size=11)
axInv.axis('equal')
return axInv, axTMat, axSig, axc, axTxt
def Plot_Inv_Technical_DefAxes(a4=False, dpi=80):
axCol = 'w'
(fW,fH) = (18,12) if not a4 else (11.69,8.27)
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=dpi)
axInv = f.add_axes([0.05, 0.05, 0.32, 0.80], frameon=True, axisbg=axCol)
axc = f.add_axes([0.38, 0.05, 0.03, 0.70], frameon=True, axisbg=axCol)
axTMat = f.add_axes([0.50, 0.81, 0.47, 0.17], frameon=True, axisbg=axCol)
axSig = f.add_axes([0.50, 0.61, 0.47, 0.17], frameon=True, axisbg=axCol)
axNit = f.add_axes([0.50, 0.47, 0.47, 0.12], frameon=True, axisbg=axCol)
axChi2N = f.add_axes([0.50, 0.33, 0.47, 0.12], frameon=True, axisbg=axCol)
axMu = f.add_axes([0.50, 0.19, 0.47, 0.12], frameon=True, axisbg=axCol)
axR = f.add_axes([0.50, 0.05, 0.47, 0.12], frameon=True, axisbg=axCol)
axTxt = f.add_axes([0.05, 0.86, 0.32, 0.04], frameon=False, axisbg='none')
axInv.set_xlabel(r"R (m)", fontsize=12, fontweight='bold'), axInv.set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
axTMat.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold'), axSig.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axNit.set_ylabel(r"Nb. iterations", fontsize=12, fontweight='bold')
axChi2N.set_ylabel(r"$\mathbf{\chi^2_N}$", fontsize=12, fontweight='bold')
axMu.set_ylabel(r"Reg. param. (a.u.)", fontsize=12, fontweight='bold')
axR.set_xlabel(r"t (s)", fontsize=12, fontweight='bold'), axR.set_ylabel(r"Obj. func. (a.u.)", fontsize=12, fontweight='bold')
axTxt.set_xticks([]), axTxt.set_yticks([]), axTxt.set_xticklabels([]), axTxt.set_yticklabels([])
axc.set_title(r"$\epsilon$ (W/m^3)", size=11)
axInv.axis('equal')
return axInv, axTMat, [axSig,axNit,axChi2N,axMu,axR], axc, axTxt
def Plot_Inv_Profiles_DefAxes(NL=4, a4=False, dpi=80):
axCol = 'w'
(fW,fH) = (18,12) if not a4 else (11.69,8.27)
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=dpi)
axInv = f.add_axes([0.05, 0.05, 0.32, 0.80], frameon=True, axisbg=axCol)
axc = f.add_axes([0.38, 0.05, 0.03, 0.70], frameon=True, axisbg=axCol)
axTMat = f.add_axes([0.50, 0.83, 0.47, 0.15], frameon=True, axisbg=axCol)
axSig = f.add_axes([0.50, 0.65, 0.47, 0.15], frameon=True, axisbg=axCol)
axInv.set_xlabel(r"R (m)", fontsize=12, fontweight='bold'), axInv.set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
axTMat.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold'), axSig.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
LaxP, RefUp, RefDo, Dz = [], 0.60, 0.05, 0.05
nl = (NL+1)/2
H = (RefUp-RefDo - (nl-1)*Dz)/nl
for ii in range(0,NL):
cc = ii/nl
il = ii-cc*nl
ax = f.add_axes([0.50 + cc*0.25, RefUp-(il+1)*H-il*Dz, 0.22, H], frameon=True, axisbg=axCol)
LaxP.append(ax)
if il == nl-1:
LaxP[ii].set_xlabel(r"Length (m)", fontsize=12, fontweight='bold')
if cc==0:
LaxP[ii].set_ylabel(r"$\mathbf{\epsilon^{\eta}}$ (W/m^3)", fontsize=12, fontweight='bold')
axTxt = f.add_axes([0.05, 0.86, 0.32, 0.04], frameon=False, axisbg='none')
axTxt.set_xticks([]), axTxt.set_yticks([]), axTxt.set_xticklabels([]), axTxt.set_yticklabels([])
axc.set_title(r"$\epsilon$ (W/m^3)", size=11)
axInv.set_aspect(aspect='equal',adjustable='datalim')
return axInv, axTMat, axSig, LaxP, axc, axTxt
def Plot_Inv_Compare_Basic_DefAxes(N=2, a4=False, dpi=80):
MR, ML, DX = 0.08, 0.06, 0.04
W = (1.-(MR+ML)-(N-1)*DX)/N
axCol = 'w'
(fW,fH) = (16,10) if not a4 else (11.69,8.27)
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=dpi)
axTMat = f.add_axes([0.55, 0.06, 0.44, 0.34], frameon=True, axisbg=axCol)
axSig = f.add_axes([0.06, 0.06, 0.44, 0.34], frameon=True, axisbg=axCol)
axc = f.add_axes([1.-MR+0.01, 0.46, 0.02, 0.44], frameon=True, axisbg=axCol)
LaxInv, LaxTxt = [], []
for ii in range(0,N):
LaxInv.append(f.add_axes([ML+ii*(DX+W), 0.46, W, 0.44], frameon=True, axisbg=axCol))
LaxTxt.append(f.add_axes([ML+ii*(DX+W), 0.90, W, 0.03], frameon=False, axisbg='none'))
LaxInv[ii].set_xlabel(r"R (m)", fontsize=12, fontweight='bold')
if ii==0:
LaxInv[ii].set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
LaxTxt[ii].set_xticks([]), LaxTxt[ii].set_yticks([])
LaxTxt[ii].set_xticklabels([]), LaxTxt[ii].set_yticklabels([])
LaxInv[ii].axis('equal')
#axTMat.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axc.set_title(r"$\epsilon$ (W/m^3)", size=11)
axSig.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axSig.set_xlabel(r"t (s)", fontsize=12, fontweight='bold')
axTMat.set_xlabel(r"Chan.", fontsize=12, fontweight='bold')
return LaxInv, axTMat, axSig, axc, LaxTxt
def Plot_Inv_Compare_Technical_DefAxes(N=2, a4=False, dpi=80):
MR, ML, DX = 0.06, 0.05, 0.04
W = (1.-(MR+ML)-(N-1)*DX)/N
axCol = 'w'
(fW,fH) = (18,12) if not a4 else (11.69,8.27)
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=dpi)
axTMat = f.add_axes([0.05, 0.26, 0.28, 0.17], frameon=True, axisbg=axCol)
axSig = f.add_axes([0.05, 0.05, 0.28, 0.17], frameon=True, axisbg=axCol)
axNit = f.add_axes([0.38, 0.05, 0.28, 0.18], frameon=True, axisbg=axCol)
axChi2N = f.add_axes([0.38, 0.25, 0.28, 0.18], frameon=True, axisbg=axCol)
axMu = f.add_axes([0.70, 0.05, 0.28, 0.18], frameon=True, axisbg=axCol)
axR = f.add_axes([0.70, 0.25, 0.28, 0.18], frameon=True, axisbg=axCol)
axc = f.add_axes([1.-MR+0.01, 0.46, 0.02, 0.44], frameon=True, axisbg=axCol)
LaxInv, LaxTxt = [], []
for ii in range(0,N):
LaxInv.append(f.add_axes([ML+ii*(DX+W), 0.48, W, 0.42], frameon=True, axisbg=axCol))
LaxTxt.append(f.add_axes([ML+ii*(DX+W), 0.90, W, 0.03], frameon=False, axisbg='none'))
LaxInv[ii].set_xlabel(r"R (m)", fontsize=12, fontweight='bold')
if ii==0:
LaxInv[ii].set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
LaxTxt[ii].set_xticks([]), LaxTxt[ii].set_yticks([])
LaxTxt[ii].set_xticklabels([]), LaxTxt[ii].set_yticklabels([])
LaxInv[ii].axis('equal')
#axTMat.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axc.set_title(r"$\epsilon$ (W/m^3)", size=11)
axSig.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axSig.set_xlabel(r"t (s)", fontsize=12, fontweight='bold')
axTMat.set_xlabel(r"Chan.", fontsize=12, fontweight='bold')
axNit.set_xlabel(r"t (s)"), axNit.set_ylabel(r"$N_{it}$")
axChi2N.set_ylabel(r"$\chi^2_N$")
axMu.set_xlabel(r"t (s)"), axMu.set_ylabel(r"Reg. param. (a.u.)")
axR.set_ylabel(r"Obj. func. (a.u.)")
return LaxInv, axTMat, [axSig,axNit,axChi2N,axMu,axR], axc, LaxTxt
def Plot_Inv_Compare_Profiles_DefAxes(N=2, NL=4, a4=False, dpi=80):
MR, ML, DX = 0.06, 0.05, 0.04
W = (1.-(MR+ML)-(N-1)*DX)/N
axCol = 'w'
(fW,fH) = (18,12) if not a4 else (11.69,8.27)
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=dpi)
axTMat = f.add_axes([0.05, 0.29, 0.28, 0.20], frameon=True, axisbg=axCol)
axSig = f.add_axes([0.05, 0.05, 0.28, 0.20], frameon=True, axisbg=axCol)
axc = f.add_axes([1.-MR+0.01, 0.52, 0.02, 0.38], frameon=True, axisbg=axCol)
LaxInv, LaxTxt = [], []
for ii in range(0,N):
LaxInv.append(f.add_axes([ML+ii*(DX+W), 0.54, W, 0.38], frameon=True, axisbg=axCol))
LaxTxt.append(f.add_axes([ML+ii*(DX+W), 0.92, W, 0.03], frameon=False, axisbg='none'))
LaxInv[ii].set_xlabel(r"R (m)", fontsize=12, fontweight='bold')
if ii==0:
LaxInv[ii].set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
LaxTxt[ii].set_xticks([]), LaxTxt[ii].set_yticks([])
LaxTxt[ii].set_xticklabels([]), LaxTxt[ii].set_yticklabels([])
LaxInv[ii].axis('equal')
LaxP, LaxPbis, RefUp, RefDo, Dz = [], [], 0.49, 0.05, 0.04
nl = (NL+1)/2
H = (RefUp-RefDo - (nl-1)*Dz)/nl
for ii in range(0,NL):
cc = ii/nl
il = ii-cc*nl
ax = f.add_axes([0.38 + cc*0.32, RefUp-(il+1)*H-il*Dz, 0.28, H], frameon=True, axisbg=axCol)
LaxP.append(ax)
#LaxPbis.append(ax.twiny())
if il == nl-1:
LaxP[ii].set_xlabel(r"Length (m)", fontsize=12, fontweight='bold')
if cc==0:
LaxP[ii].set_ylabel(r"$\mathbf{\epsilon^{\eta}}$ (W/m^3)", fontsize=12, fontweight='bold')
#axTMat.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axc.set_title(r"$\epsilon$ (W/m^3)", size=11)
axSig.set_ylabel(r"Signal (mW)", fontsize=12, fontweight='bold')
axSig.set_xlabel(r"t (s)", fontsize=12, fontweight='bold')
axTMat.set_xlabel(r"Chan.", fontsize=12, fontweight='bold')
return LaxInv, axTMat, axSig, LaxP, LaxPbis, axc, LaxTxt
def Plot_Inv_FFTPow_DefAxes(NPts=6, a4=False):
(fW,fH,fdpi,axCol) = (11.69,8.27,80,'w') if a4 else (18,12,80,'w')
f = plt.figure(figsize=(fW,fH),facecolor=axCol,dpi=fdpi)
axInv = f.add_axes([0.05, 0.05, 0.32, 0.80], frameon=True, axisbg=axCol)
axc = f.add_axes([0.38, 0.05, 0.03, 0.70], frameon=True, axisbg=axCol)
axRetro = f.add_axes([0.50, 0.81, 0.47, 0.17], frameon=True, axisbg=axCol)
axSXR = f.add_axes([0.50, 0.61, 0.47, 0.17], frameon=True, axisbg=axCol)
axInv.set_xlabel(r"R (m)", fontsize=12, fontweight='bold')
axInv.set_ylabel(r"Z (m)", fontsize=12, fontweight='bold')
axInv.axis('equal')
axRetro.set_xlabel(r"Channels"), axRetro.set_ylabel(r"SXR (mW)")
axSXR.set_ylabel(r"SXR (mW)")
Lax = []
if NPts <= 4:
N2 = 1
Hei = (0.58-0.05)/NPts - (NPts-1)*Dis
for ii in range(0,NPts):
Lax.append(f.add_axes([0.50, 0.58-(ii+1)*Hei-ii*Dis, 0.47, Hei], frameon=True, axisbg=axCol))
Lax[-1].set_ylabel(r"Freq. (kHz)", fontsize=12, fontweight='bold')
Lax[-1].grid(True)
Lax[-1].set_xlabel(r"time (s)", fontsize=12, fontweight='bold')
else:
N2, Dis = int(np.ceil(NPts/2.)), 0.03
Hei = (0.58-0.05-(N2-1)*Dis)/N2
LaxPos = [[[0.50, 0.58-(ii+1)*Hei-ii*Dis, 0.22, Hei],[0.75, 0.58-(ii+1)*Hei-ii*Dis, 0.22, Hei]] for ii in range(0,N2)]
#LaxPos = list(itt.chain._from_iterable(LaxPos))
for ii in range(0,N2):
Lax.append(f.add_axes(LaxPos[ii][0], frameon=True, axisbg=axCol))
Lax[-1].set_ylabel(r"Freq. (kHz)", fontsize=12, fontweight='bold')
Lax[-1].grid(True)
Lax.append(f.add_axes(LaxPos[ii][1], frameon=True, axisbg=axCol))
Lax[-1].grid(True)
Lax[-1].set_xticklabels([]), Lax[-1].set_yticklabels([])
Lax[-2].set_xticklabels([])
Lax[-2].set_xlabel(r"time (s)", fontsize=12, fontweight='bold')
Lax[-1].set_xlabel(r"time (s)", fontsize=12, fontweight='bold')
return axInv, axc, axRetro, axSXR, Lax
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.