repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dhruv13J/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
bzcheeseman/phys211 | Alex/Compton Scattering/plotter.py | 1 | 2439 | from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt
def read_data():
datafile = r'data/data.csv'
data = np.genfromtxt(datafile, delimiter=',', skiprows=1)
return data
data = read_data()
def linear(p, x):
return p[0] * x + p[1]
def residual(p, x, y, err):
return (linear(p, x) - y) / err
#constants
mc2 = 511. #keV
E = 662. #keV, energy of initial gamma photons
#Day 1
angles1 = data[:,2] #x
cent1 = data[:,1] #y
#Calibration
e1 = np.array([81., 356., 662.])
c1 = np.array([118., 505., 921.]) # +/- 3
c1err = np.array([3., 3., 3.])
#Fitting
# Channel = A1 * Energy + A2
p1 = [0., 0.]
pf1, cov1, info1, mesg1, success1 = optimize.leastsq(residual, p1, args=(e1, c1, c1err), full_output=1)
chisq1 = sum(info1["fvec"]*info1["fvec"])
dof1 = len(e1)-len(pf1)
pferr1 = [np.sqrt(cov1[i,i]) for i in range(len(pf1))]
print 'pf1', pf1, '\n', cov1
A1 = 1 / pf1[0]
A1err = (pferr1[0] / pf1[0]) * (1 / pf1[0])
A2 = - pf1[1] / pf1[0]
A2err = np.sqrt(pferr1[0] * (2 * pf1[1] / pf1[0]*pf1[0]) + pferr1[1] * (1 / pf1[0]))
A = np.array([A1, A2])
Aerr = np.array([A1err, A2err])
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1 = plt.axes()
ax1.errorbar(c1, e1, xerr=c1err, yerr=0.0, fmt='k.', label = 'Data')
T = np.linspace(c1.min(), c1.max(), 5000)
ax1.plot(T, linear(A, T), 'r-', label = 'calibration')
ax1.set_title('Channel to Energy calibration - Day 1')
ax1.set_xlabel('Channel Number')
ax1.set_ylabel('Energy (keV)')
ax1.legend(loc=(0.7,0.5))
textfit = '$E = A_1 * channel + A_2$ \n' \
'$A_1 = %.2f \pm %.2f$ keV \n' \
'$A_2 = %.0f \pm %.0f$ keV \n' \
'$\chi^2= %.2f$ \n' \
'$N = %i$ (dof) \n' \
'$\chi^2/N = % .2f$' \
% (pf1[0], pferr1[0], pf1[1], pferr1[1], chisq1, dof1,
chisq1/dof1)
ax1.text(0.15, .8, textfit, transform=ax1.transAxes, fontsize=12,
verticalalignment='top')
plt.show()
energies = linear(A, cent1)
enerr = (A1err/A1) * cent1 + A2err
print angles1
print energies
print enerr
#m2, b2 = np.polyfit(c2, e1, 1)
#
#def fit2(x):
# return m2 * x + b2
#en2 = fit2(c2)
#
#
#
### DATA is angles1, angles2, en1, en2
#y = np.append(angles1, angles2)
#x = np.append(en1, en2)
#yerr = [1 * np.pi / 180] * len(y)
#
#
#def func(p,x):
# return np.arccos(p[0]/x)
#
#p = [1 - (E/(1 + E/mc2))] #expected
#
#popt, pcov = curve_fit(func, x, y, p, yerr, maxfev=int(2e6)) | lgpl-3.0 |
rxa254/MoodCube | Plotting/randomNoiseAnimation.py | 1 | 1571 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy import ndimage
# Update the matplotlib configuration parameters:
plt.rcParams.update({'font.size': 20,
'font.family': 'serif',
'figure.figsize': (10, 8),
'axes.grid': True,
'grid.color': '#555555'})
# this is the dimensions of the jellyfish
# 8 tentacles, 64 LEDs / tentacle, 3 colors/LED
d = 2**7
Nbits = 24
cmap = 'nipy_spectral'
#cmap = 'inferno'
zz = np.random.randint(low = 0,
high = 2**Nbits - 1,
size = (d, d),
dtype = 'uint')
fig = plt.figure()
im = plt.imshow(zz, animated = True,
interpolation = 'hermite',
cmap = cmap)
plt.xticks([])
plt.yticks([])
fig.tight_layout()
def updatefig(*args):
z = np.random.randint(low = 0,
high = 2**Nbits - 1,
size = zz.shape,
dtype = 'uint')
input_ = np.fft.fft2(z)
result = ndimage.fourier_gaussian(input_, sigma=1)
z = np.fft.ifft2(result)
z = np.abs(z)
im.set_array(z)
return im,
#leg = plt.legend(loc='best', fancybox=True, fontsize=14)
#leg.get_frame().set_alpha(0.5)
#plt.savefig("TRY.pdf", bbox_inches='tight')
ani = animation.FuncAnimation(fig, updatefig,
interval = 200,
blit = True)
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/tests/indexes/multi/test_missing.py | 2 | 3378 | import numpy as np
import pytest
import pandas as pd
from pandas import MultiIndex
import pandas._testing as tm
def test_fillna(idx):
# GH 11343
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
def test_dropna():
# GH 6194
idx = pd.MultiIndex.from_arrays(
[
[1, np.nan, 3, np.nan, 5],
[1, 2, np.nan, np.nan, 5],
["a", "b", "c", np.nan, "e"],
]
)
exp = pd.MultiIndex.from_arrays([[1, 5], [1, 5], ["a", "e"]])
tm.assert_index_equal(idx.dropna(), exp)
tm.assert_index_equal(idx.dropna(how="any"), exp)
exp = pd.MultiIndex.from_arrays(
[[1, np.nan, 3, 5], [1, 2, np.nan, 5], ["a", "b", "c", "e"]]
)
tm.assert_index_equal(idx.dropna(how="all"), exp)
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
idx.dropna(how="xxx")
# GH26408
# test if missing values are dropped for multiindex constructed
# from codes and values
idx = MultiIndex(
levels=[[np.nan, None, pd.NaT, "128", 2], [np.nan, None, pd.NaT, "128", 2]],
codes=[[0, -1, 1, 2, 3, 4], [0, -1, 3, 3, 3, 4]],
)
expected = MultiIndex.from_arrays([["128", 2], ["128", 2]])
tm.assert_index_equal(idx.dropna(), expected)
tm.assert_index_equal(idx.dropna(how="any"), expected)
expected = MultiIndex.from_arrays(
[[np.nan, np.nan, "128", 2], ["128", "128", "128", 2]]
)
tm.assert_index_equal(idx.dropna(how="all"), expected)
def test_nulls(idx):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
@pytest.mark.xfail(reason="isna is not defined for MultiIndex")
def test_hasnans_isnans(idx):
# GH 11343, added tests for hasnans / isnans
index = idx.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(index), dtype=bool)
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is False
index = idx.copy()
values = index.values
values[1] = np.nan
index = type(idx)(values)
expected = np.array([False] * len(index), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is True
def test_nan_stays_float():
# GH 7031
idx0 = pd.MultiIndex(
levels=[["A", "B"], []], codes=[[1, 0], [-1, -1]], names=[0, 1]
)
idx1 = pd.MultiIndex(levels=[["C"], ["D"]], codes=[[0], [0]], names=[0, 1])
idxm = idx0.join(idx1, how="outer")
assert pd.isna(idx0.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(idxm.get_level_values(1)[:-1]).all()
df0 = pd.DataFrame([[1, 2]], index=idx0)
df1 = pd.DataFrame([[3, 4]], index=idx1)
dfm = df0 - df1
assert pd.isna(df0.index.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(dfm.index.get_level_values(1)[:-1]).all()
def test_tuples_have_na():
index = MultiIndex(
levels=[[1, 0], [0, 1, 2, 3]],
codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
)
assert pd.isna(index[4][0])
assert pd.isna(index.values[4][0])
| bsd-3-clause |
balister/GNU-Radio | gr-filter/examples/fir_filter_fff.py | 5 | 3225 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_fff(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
anubhavvardhan/qutip | qutip/tomography.py | 9 | 7107 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
__all__ = ['qpt_plot', 'qpt_plot_combined', 'qpt']
from qutip.tensor import tensor
from qutip.superoperator import spre, spost, mat2vec, vec2mat
from numpy import hstack, real, imag
import scipy.linalg as la
from qutip.visualization import matrix_histogram, matrix_histogram_complex
try:
import matplotlib.pyplot as plt
except:
pass
def _index_permutations(size_list, perm=[]):
"""
Generate a list with all index permutations.
Parameters
----------
size_list : list
A list that contains the sizes for each composite system.
perm : list
A list of permutations
Returns
-------
perm_idx : list
List containing index permutations.
"""
if len(size_list) == 0:
yield perm
else:
for n in range(size_list[0]):
for ip in _index_permutations(size_list[1:], perm + [n]):
yield ip
def qpt_plot(chi, lbls_list, title=None, fig=None, axes=None):
"""
Visualize the quantum process tomography chi matrix. Plot the real and
imaginary parts separately.
Parameters
----------
chi : array
Input QPT chi matrix.
lbls_list : list
List of labels for QPT plot axes.
title : string
Plot title.
fig : figure instance
User defined figure instance used for generating QPT plot.
axes : list of figure axis instance
User defined figure axis instance (list of two axes) used for
generating QPT plot.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if axes is None or len(axes) != 2:
if fig is None:
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(1, 2, 1, projection='3d', position=[0, 0, 1, 1])
ax2 = fig.add_subplot(1, 2, 2, projection='3d', position=[0, 0, 1, 1])
axes = [ax1, ax2]
xlabels = []
for inds in _index_permutations([len(lbls) for lbls in lbls_list]):
xlabels.append("".join([lbls_list[k][inds[k]]
for k in range(len(lbls_list))]))
matrix_histogram(real(chi), xlabels, xlabels,
title=r"real($\chi$)", limits=[-1, 1], ax=axes[0])
matrix_histogram(imag(chi), xlabels, xlabels,
title=r"imag($\chi$)", limits=[-1, 1], ax=axes[1])
if title and fig:
fig.suptitle(title)
return fig, axes
def qpt_plot_combined(chi, lbls_list, title=None,
fig=None, ax=None, figsize=(8, 6),
threshold=None):
"""
Visualize the quantum process tomography chi matrix. Plot bars with
height and color corresponding to the absolute value and phase,
respectively.
Parameters
----------
chi : array
Input QPT chi matrix.
lbls_list : list
List of labels for QPT plot axes.
title : string
Plot title.
fig : figure instance
User defined figure instance used for generating QPT plot.
ax : figure axis instance
User defined figure axis instance used for generating QPT plot
(alternative to the fig argument).
threshold: float (None)
Threshold for when bars of smaller height should be transparent. If
not set, all bars are colored according to the color map.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
"""
if ax is None:
if fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1, projection='3d', position=[0, 0, 1, 1])
xlabels = []
for inds in _index_permutations([len(lbls) for lbls in lbls_list]):
xlabels.append("".join(
[lbls_list[k][inds[k]] for k in range(len(lbls_list))]))
if not title:
title = r"$\chi$"
matrix_histogram_complex(chi, xlabels, xlabels, title=title, ax=ax,
threshold=threshold)
return fig, ax
def qpt(U, op_basis_list):
"""
Calculate the quantum process tomography chi matrix for a given (possibly
nonunitary) transformation matrix U, which transforms a density matrix in
vector form according to:
vec(rho) = U * vec(rho0)
or
rho = vec2mat(U * mat2vec(rho0))
U can be calculated for an open quantum system using the QuTiP propagator
function.
Parameters
----------
U : Qobj
Transformation operator. Can be calculated using QuTiP propagator
function.
op_basis_list : list
A list of Qobj's representing the basis states.
Returns
-------
chi : array
QPT chi matrix
"""
E_ops = []
# loop over all index permutations
for inds in _index_permutations([len(ops) for ops in op_basis_list]):
# loop over all composite systems
E_op_list = [op_basis_list[k][inds[k]] for k in range(len(
op_basis_list))]
E_ops.append(tensor(E_op_list))
EE_ops = [spre(E1) * spost(E2.dag()) for E1 in E_ops for E2 in E_ops]
M = hstack([mat2vec(EE.full()) for EE in EE_ops])
Uvec = mat2vec(U.full())
chi_vec = la.solve(M, Uvec)
return vec2mat(chi_vec)
| bsd-3-clause |
kernc/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
martimy/SANDProject | sand/add.py | 1 | 8333 | #!/usr/bin/python3
# Copyright (c) 2017 Maen Artimy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file includes a Python implementation of the ADD Algorithm described in:
# Aaron Kershenbaum. 1993. Telecommunications Network Design Algorithms. McGraw-
# Hill, Inc., New York, NY, USA.
from .main import SANDAlgorithm
import networkx as nx
import matplotlib.pyplot as plt
class ADD(SANDAlgorithm):
def __init__(self):
super().__init__()
def run(self, cost, Ccost, weight, center=0, Wlimit=10, th_move=0):
self.nt = len(cost) # Number of terminals
self.nc = len(cost[0]) # Number of concentrators
self.Wlimit = Wlimit # Maximum number of terminals
self.th_move = th_move # Cost to move terminal
self.cost = cost # Cost matrix (nt x nc)
self.weight = weight # Load vector (nt)
self.Ccost = Ccost # Cost to build a concentrator (nc)
self.center = center # Index of the central location
self.logger.debug('Starting ADD Algorithm')
# Associate all nodes with the central location
# and calculate the initial cost
self.Cassoc = [self.center] * self.nt # Association with a conc
self.cTotal = sum([self.cost[t][self.Cassoc[t]]
for t in range(self.nt)]) + self.Ccost[self.center]
self.logger.debug("Initial cost = %d" % self.cTotal)
# Calculate the cost savings for the remaining concentrators
remConc = list(range(self.nc))
remConc.remove(self.center)
self.logger.debug("Concentrators to be evaluated = %s" % remConc)
while len(remConc) > 0:
savings = 0
conc = 0
for t in remConc:
expense = self.__evalConc(t)
if(expense < savings):
savings = expense
conc = t
if(savings < 0):
self.__addConc(conc)
self.cTotal += savings
self.logger.debug("Concentrator %d \
is added for total cost of %d" % (conc, self.cTotal))
remConc.remove(conc)
self.logger.debug("Current association = %s" % self.Cassoc)
else:
self.logger.debug("No more savings!")
break
# Sanity check
tCost = sum([self.cost[t][self.Cassoc[t]] for t in range(self.nt)])
cCost = sum([self.Ccost[c] for c in set(self.Cassoc)])
if((tCost + cCost) != self.cTotal):
self.logger.error("Something is wrong, \
detected cost discrepancy! %d %d %d" % (tCost, cCost, self.cTotal))
return({"cost": self.cTotal, "center": self.center, "num": self.nt,
"assoc": self.Cassoc, "conc": set(self.Cassoc)})
def __evalConc(self, c):
delta = [0] * self.nt
ter = [0] * self.nt
expense = self.Ccost[c]
slack = self.Wlimit
n = 0
# Calculates the saving if any that results from
# connecting terminal t to concentrator c
# count the number of terminals and
# save the terminal number in Ter
for t in range(self.nt):
s = self.cost[t][c] - self.cost[t][self.Cassoc[t]]
if(s < 0):
delta[n] = s # amount saved
ter[n] = t # terminal
n += 1
if(n == 0): # no terminal benefited
return(expense)
# Sort the savings, largest first and return index of sorted list
permu = sorted(range(len(delta)), key=lambda k: delta[k])
for p in permu:
t = ter[p]
if(delta[p] >= 0):
break
elif((self.weight[t] <= slack) and ((self.Cassoc[t] == self.center)
or (delta[p] + self.th_move < 0))):
expense += delta[p]
slack -= self.weight[t]
self.logger.debug("Savings for concentrator %d is %d" % (c, expense))
return(expense)
def __addConc(self, c):
delta = []
ter = []
for t in range(self.nt):
s = self.cost[t][c] - self.cost[t][self.Cassoc[t]]
if(s < 0):
delta.append(s) # amount saved
ter.append(t) # terminal
slack = self.Wlimit
permu = sorted(range(len(delta)), key=lambda k: delta[k])
#permu = [b[0] for b in sorted(enumerate(delta), key=lambda k:k[1])]
for p in permu:
t = ter[p]
if(delta[p] >= 0):
break
elif((self.weight[t] <= slack) and ((self.Cassoc[t] == self.center)
or (delta[p] + self.th_move < 0))):
self.Cassoc[t] = c
slack -= self.weight[t]
self.logger.debug("Adding concentrator %d" % c)
# print cost list of network produced by ADD algorithm
def printCost(out, cost):
concList = out["conc"]
termList = out["assoc"]
ncenter = out["center"]
numConc = len(concList)
nodeAssoc = [(i, termList.count(i)-1) for i in concList]
c = [cost[i][ncenter] for i in range(len(cost))]
print("Original cost =", sum(c))
print("Central node =", ncenter)
print("Number of concentrators =", numConc)
print("Number of nodes per concentrators =")
print('%4s\t%10s' % ("Conc", "Terminals"))
for n in nodeAssoc:
print('%4d\t%10d' % (n[0], n[1]))
print("Total Cost =", out["cost"])
# Plot topology produced by MENTOR algorithm
def plotNetwork(out, pos, labels=[], filename="figure_add.png",
title='ADD Algorithm', show_center=False):
numNodes = out["num"]
center = out["center"]
concList = out["conc"]
edges = [(k, out["assoc"][k]) for k in range(numNodes)]
tree = [(center, n) for n in out["conc"]]
plt.figure(figsize=(6, 6), facecolor="white")
G = nx.path_graph(numNodes)
nx.draw_networkx_edges(G, pos, edgelist=edges, alpha=0.3,
edge_color="blue")
if show_center:
nx.draw_networkx_edges(G, pos, edgelist=tree, width=2,
edge_color="blue", alpha=0.1)
# Draw all nodes
nx.draw_networkx_nodes(G, pos, node_size=10, node_color="green", alpha=0.5)
nx.draw_networkx_nodes(G, pos, nodelist=[center], node_size=150,
node_color="black")
nx.draw_networkx_nodes(G, pos, nodelist=concList, node_size=50,
node_color="red")
# Draw node and edge labels
#elabels = {e:ch[mesh.index(e)] for e in mesh}
# nx.draw_networkx_edge_labels(G, pos, elabels, edgelist=mesh, font_size=10,
# font_color="grey")
if labels:
nLabel = {n: labels[n] for n in concList}
npos = {n: (pos[n][0], pos[n][1]+0.03) for n in pos}
nx.draw_networkx_labels(G, npos, nLabel, nodelist=concList,
font_size=10, font_color="black")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.axis('off')
plt.title(title)
plt.savefig(filename)
plt.show()
| mit |
tapomayukh/projects_in_python | classification/Classification_with_HMM/Single_Contact_Classification/multivariate_gaussian_emissions/test_crossvalidation_force_motion_10_states.py | 1 | 16532 | # Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Scaled')
from data_Scaled_method_III import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((10,1))
mu_2 = np.zeros((10,1))
cov = np.zeros((10,2,2))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
#if index == 0:
#print 'mean = ', mu_2[index]
#print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
#print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
#print cov[index,:,:]
#print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
#print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__' or __name__ != '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
Fmat_import = Fmat
#print np.shape(Fmat[0])
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf_force,mu_rf_motion,cov_rf = feature_to_mu_cov(Fmat[0:121,0:35],Fmat[242:363,0:35])
mu_rm_force,mu_rm_motion,cov_rm = feature_to_mu_cov(Fmat[0:121,35:70],Fmat[242:363,35:70])
mu_sf_force,mu_sf_motion,cov_sf = feature_to_mu_cov(Fmat[0:121,70:105],Fmat[242:363,70:105])
mu_sm_force,mu_sm_motion,cov_sm = feature_to_mu_cov(Fmat[0:121,105:140],Fmat[242:363,105:140])
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = [0.0]*10
B_rm = [0.0]*10
B_sf = [0.0]*10
B_sm = [0.0]*10
for num_states in range(10):
B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1]]]
B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[cov_rm[num_states][0][0],cov_rm[num_states][0][1],cov_rm[num_states][1][0],cov_rm[num_states][1][1]]]
B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[cov_sf[num_states][0][0],cov_sf[num_states][0][1],cov_sf[num_states][1][0],cov_sf[num_states][1][1]]]
B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[cov_sm[num_states][0][0],cov_sm[num_states][0][1],cov_sm[num_states][1][0],cov_sm[num_states][1][1]]]
#print cov_sm[num_states][0][0],cov_sm[num_states][0][1],cov_sm[num_states][1][0],cov_sm[num_states][1][1]
#print "----"
#print B_sm
#print mu_sm_motion
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm, pi) # Will be Trained
# For Training
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
total_seq = np.matrix(np.concatenate((np.array(Fmat[0:121,:]),np.array(Fmat[242:363,:])),axis=0))
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
while (trial_number < 6):
# For Training
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[:,1:5]
total_seq_rm = total_seq[:,36:40]
total_seq_sf = total_seq[:,71:75]
total_seq_sm = total_seq[:,106:110]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+1:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+36:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+71:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+106:j+110]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = np.column_stack((total_seq[:,0],total_seq[:,2:5]))
total_seq_rm = np.column_stack((total_seq[:,35],total_seq[:,37:40]))
total_seq_sf = np.column_stack((total_seq[:,70],total_seq[:,72:75]))
total_seq_sm = np.column_stack((total_seq[:,105],total_seq[:,107:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0],total_seq[:,j+2:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35],total_seq[:,j+37:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70],total_seq[:,j+72:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105],total_seq[:,j+107:j+110]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = np.column_stack((total_seq[:,0:2],total_seq[:,3:5]))
total_seq_rm = np.column_stack((total_seq[:,35:37],total_seq[:,38:40]))
total_seq_sf = np.column_stack((total_seq[:,70:72],total_seq[:,73:75]))
total_seq_sm = np.column_stack((total_seq[:,105:107],total_seq[:,108:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0:j+2],total_seq[:,j+3:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35:j+37],total_seq[:,j+38:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70:j+72],total_seq[:,j+73:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105:j+107],total_seq[:,j+108:j+110]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = np.column_stack((total_seq[:,0:3],total_seq[:,4:5]))
total_seq_rm = np.column_stack((total_seq[:,35:38],total_seq[:,39:40]))
total_seq_sf = np.column_stack((total_seq[:,70:73],total_seq[:,74:75]))
total_seq_sm = np.column_stack((total_seq[:,105:108],total_seq[:,109:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0:j+3],total_seq[:,j+4:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35:j+38],total_seq[:,j+39:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70:j+73],total_seq[:,j+74:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105:j+108],total_seq[:,j+109:j+110]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[:,0:4]
total_seq_rm = total_seq[:,35:39]
total_seq_sf = total_seq[:,70:74]
total_seq_sm = total_seq[:,105:109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+0:j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35:j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70:j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105:j+109]))
j = j+5
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[:,0]
total_seq_rm = total_seq[:,35]
total_seq_sf = total_seq[:,70]
total_seq_sm = total_seq[:,105]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+35]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+70]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = total_seq[:,1]
total_seq_rm = total_seq[:,36]
total_seq_sf = total_seq[:,71]
total_seq_sm = total_seq[:,106]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+1]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+36]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+71]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+106]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[:,2]
total_seq_rm = total_seq[:,37]
total_seq_sf = total_seq[:,72]
total_seq_sm = total_seq[:,107]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+2]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+37]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+72]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+107]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[:,3]
total_seq_rm = total_seq[:,38]
total_seq_sf = total_seq[:,73]
total_seq_sm = total_seq[:,108]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+3]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+38]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+73]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+108]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[:,4]
total_seq_rm = total_seq[:,39]
total_seq_sf = total_seq[:,74]
total_seq_sm = total_seq[:,109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[:,j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[:,j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[:,j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[:,j+109]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[:,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
path_rm_obj = model_rm.viterbi(final_ts_obj)
path_sf_obj = model_sf.viterbi(final_ts_obj)
path_sm_obj = model_sm.viterbi(final_ts_obj)
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
trial_number = trial_number + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 7
i = 0
while (k < 29):
arrsum_rf[i] = np.sum(rf_final[k-7:k,0])
arrsum_rm[i] = np.sum(rm_final[k-7:k,0])
arrsum_sf[i] = np.sum(sf_final[k-7:k,0])
arrsum_sm[i] = np.sum(sm_final[k-7:k,0])
i = i+1
k = k+7
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
#Nlabels = 4
#fig = pp.figure()
#ax = fig.add_subplot(111)
#figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
#ax.set_title('Performance of HMM Models')
#pp.xlabel("Targets")
#pp.ylabel("Predictions")
#ax.set_xticks([0.5,1.5,2.5,3.5])
#ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
#ax.set_yticks([3.5,2.5,1.5,0.5])
#ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
#figbar = fig.colorbar(figplot)
#i = 0
#while (i < 4):
#j = 0
#while (j < 4):
#pp.text(j+0.5,3.5-i,cmat[i][j])
#j = j+1
#i = i+1
#pp.show()
| mit |
mengxn/tensorflow | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
SamStudio8/scikit-bio | skbio/stats/distance/tests/test_bioenv.py | 13 | 9972 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import pandas as pd
from skbio import DistanceMatrix
from skbio.stats.distance import bioenv
from skbio.stats.distance._bioenv import _scale
from skbio.util import get_data_path, assert_data_frame_almost_equal
class BIOENVTests(TestCase):
"""Results were verified with R 3.0.2 and vegan 2.0-10 (vegan::bioenv)."""
def setUp(self):
# The test dataset used here is a subset of the Lauber et al. 2009
# "88 Soils" dataset. It has been altered to exercise various aspects
# of the code, including (but not limited to):
#
# - order of distance matrix IDs and IDs in data frame (metadata) are
# not exactly the same
# - data frame has an extra sample that is not in the distance matrix
# - this extra sample has non-numeric and missing values in some of its
# cells
#
# Additional variations of the distance matrix and data frame are used
# to test different orderings of rows/columns, extra non-numeric data
# frame columns, etc.
#
# This dataset is also useful because it is non-trivial in size (6
# samples, 11 environment variables) and it includes positive/negative
# floats and integers in the data frame.
self.dm = DistanceMatrix.read(get_data_path('dm.txt'))
# Reordered rows and columns (i.e., different ID order). Still
# conceptually the same distance matrix.
self.dm_reordered = DistanceMatrix.read(
get_data_path('dm_reordered.txt'))
self.df = pd.read_csv(get_data_path('df.txt'), sep='\t', index_col=0)
# Similar to the above data frame, except that it has an extra
# non-numeric column, and some of the other rows and columns have been
# reordered.
self.df_extra_column = pd.read_csv(
get_data_path('df_extra_column.txt'), sep='\t', index_col=0)
# All columns in the original data frame (these are all numeric
# columns).
self.cols = self.df.columns.tolist()
# This second dataset is derived from vegan::bioenv's example dataset
# (varespec and varechem). The original dataset includes a site x
# species table (e.g., OTU table) and a data frame of environmental
# variables. Since the bioenv function defined here accepts a distance
# matrix, we use a Bray-Curtis distance matrix that is derived from the
# site x species table (this matches what is done by vegan::bioenv when
# provided an OTU table, using their default distance measure). The
# data frame only includes the numeric environmental variables we're
# interested in for these tests: log(N), P, K, Ca, pH, Al
self.dm_vegan = DistanceMatrix.read(
get_data_path('bioenv_dm_vegan.txt'))
self.df_vegan = pd.read_csv(
get_data_path('bioenv_df_vegan.txt'), sep='\t',
converters={0: str})
self.df_vegan.set_index('#SampleID', inplace=True)
# Load expected results.
self.exp_results = pd.read_csv(get_data_path('exp_results.txt'),
sep='\t', index_col=0)
self.exp_results_single_column = pd.read_csv(
get_data_path('exp_results_single_column.txt'), sep='\t',
index_col=0)
self.exp_results_different_column_order = pd.read_csv(
get_data_path('exp_results_different_column_order.txt'), sep='\t',
index_col=0)
self.exp_results_vegan = pd.read_csv(
get_data_path('bioenv_exp_results_vegan.txt'), sep='\t',
index_col=0)
def test_bioenv_all_columns_implicit(self):
# Test with all columns in data frame (implicitly).
obs = bioenv(self.dm, self.df)
assert_data_frame_almost_equal(obs, self.exp_results)
# Should get the same results if order of rows/cols in distance matrix
# is changed.
obs = bioenv(self.dm_reordered, self.df)
assert_data_frame_almost_equal(obs, self.exp_results)
def test_bioenv_all_columns_explicit(self):
# Test with all columns being specified.
obs = bioenv(self.dm, self.df, columns=self.cols)
assert_data_frame_almost_equal(obs, self.exp_results)
# Test against a data frame that has an extra non-numeric column and
# some of the rows and columns reordered (we should get the same
# result since we're specifying the same columns in the same order).
obs = bioenv(self.dm, self.df_extra_column, columns=self.cols)
assert_data_frame_almost_equal(obs, self.exp_results)
def test_bioenv_single_column(self):
obs = bioenv(self.dm, self.df, columns=['PH'])
assert_data_frame_almost_equal(obs, self.exp_results_single_column)
def test_bioenv_different_column_order(self):
# Specifying columns in a different order will change the row labels in
# the results data frame as the column subsets will be reordered, but
# the actual results (e.g., correlation coefficients) shouldn't change.
obs = bioenv(self.dm, self.df, columns=self.cols[::-1])
assert_data_frame_almost_equal(
obs,
self.exp_results_different_column_order)
def test_bioenv_no_side_effects(self):
# Deep copies of both primary inputs.
dm_copy = self.dm.copy()
df_copy = self.df.copy(deep=True)
bioenv(self.dm, self.df)
# Make sure we haven't modified the primary input in some way (e.g.,
# with scaling, type conversions, etc.).
self.assertEqual(self.dm, dm_copy)
assert_data_frame_almost_equal(self.df, df_copy)
def test_bioenv_vegan_example(self):
# The correlation coefficient in the first row of the
# results (rho=0.2516) is different from the correlation coefficient
# computed by vegan (rho=0.2513). This seems to occur due to
# differences in numerical precision when calculating the Euclidean
# distances, which affects the rank calculations in Spearman
# (specifically, dealing with ties). The ranked distances end up being
# slightly different between vegan and our implementation because some
# distances are treated as ties in vegan but treated as distinct values
# in our implementation. This explains the difference in rho values. I
# verified that using Pearson correlation instead of Spearman on the
# same distances yields *very* similar results. Thus, the discrepancy
# seems to stem from differences when computing ranks/ties.
obs = bioenv(self.dm_vegan, self.df_vegan)
assert_data_frame_almost_equal(obs, self.exp_results_vegan)
def test_bioenv_no_distance_matrix(self):
with self.assertRaises(TypeError):
bioenv('breh', self.df)
def test_bioenv_no_data_frame(self):
with self.assertRaises(TypeError):
bioenv(self.dm, None)
def test_bioenv_duplicate_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=self.cols + ['PH'])
def test_bioenv_no_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=[])
def test_bioenv_missing_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=self.cols + ['brofist'])
def test_bioenv_missing_distance_matrix_ids(self):
df = self.df[1:]
with self.assertRaises(ValueError):
bioenv(self.dm, df)
def test_bioenv_nans(self):
df = self.df.replace(53.9, np.nan)
with self.assertRaises(ValueError):
bioenv(self.dm, df)
def test_bioenv_nonnumeric_columns(self):
df = self.df.replace(2400, 'no cog yay')
with self.assertRaises(TypeError):
bioenv(self.dm, df)
with self.assertRaises(TypeError):
bioenv(self.dm, self.df_extra_column)
def test_scale_single_column(self):
df = pd.DataFrame([[1], [0], [2]], index=['A', 'B', 'C'],
columns=['foo'])
exp = pd.DataFrame([[0.0], [-1.0], [1.0]], index=['A', 'B', 'C'],
columns=['foo'])
obs = _scale(df)
assert_data_frame_almost_equal(obs, exp)
def test_scale_multiple_columns(self):
# Floats and ints, including positives and negatives.
df = pd.DataFrame([[7.0, 400, -1],
[8.0, 530, -5],
[7.5, 450, 1],
[8.5, 810, -4]],
index=['A', 'B', 'C', 'D'],
columns=['pH', 'Elevation', 'negatives'])
exp = pd.DataFrame([[-1.161895, -0.805979, 0.453921],
[0.387298, -0.095625, -0.998625],
[-0.387298, -0.532766, 1.180194],
[1.161895, 1.434369, -0.635489]],
index=['A', 'B', 'C', 'D'],
columns=['pH', 'Elevation', 'negatives'])
obs = _scale(df)
assert_data_frame_almost_equal(obs, exp)
def test_scale_no_variance(self):
df = pd.DataFrame([[-7.0, -1.2], [6.2, -1.2], [2.9, -1.2]],
index=['A', 'B', 'C'], columns=['foo', 'bar'])
with self.assertRaises(ValueError):
_scale(df)
if __name__ == '__main__':
main()
| bsd-3-clause |
yl565/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | 37 | 6008 | # -*- coding: utf-8 -*-
"""Examples of non-linear functions for non-parametric regression
Created on Sat Jan 05 20:21:22 2013
Author: Josef Perktold
"""
import numpy as np
## Functions
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2)
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2)
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2)
def func1(x):
'''made up example with sin, square
'''
return np.sin(x * 5) / x + 2. * x - 1. * x**2
## Classes with Data Generating Processes
doc = {'description':
'''Base Class for Univariate non-linear example
Does not work on it's own.
needs additional at least self.func
''',
'ref': ''}
class _UnivariateFunction(object):
#Base Class for Univariate non-linear example.
#Does not work on it's own. needs additionally at least self.func
__doc__ = '''%(description)s
Parameters
----------
nobs : int
number of observations to simulate
x : None or 1d array
If x is given then it is used for the exogenous variable instead of
creating a random sample
distr_x : None or distribution instance
Only used if x is None. The rvs method is used to create a random
sample of the exogenous (explanatory) variable.
distr_noise : None or distribution instance
The rvs method is used to create a random sample of the errors.
Attributes
----------
x : ndarray, 1-D
exogenous or explanatory variable. x is sorted.
y : ndarray, 1-D
endogenous or response variable
y_true : ndarray, 1-D
expected values of endogenous or response variable, i.e. values of y
without noise
func : callable
underlying function (defined by subclass)
%(ref)s
''' #% doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None:
if distr_x is None:
x = np.random.normal(loc=0, scale=self.s_x, size=nobs)
else:
x = distr_x.rvs(size=nobs)
x.sort()
self.x = x
if distr_noise is None:
noise = np.random.normal(loc=0, scale=self.s_noise, size=nobs)
else:
noise = distr_noise.rvs(size=nobs)
if hasattr(self, 'het_scale'):
noise *= self.het_scale(self.x)
#self.func = fg1
self.y_true = y_true = self.func(x)
self.y = y_true + noise
def plot(self, scatter=True, ax=None):
'''plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter: bool
If true, then add scatterpoints of sample to plot.
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
fig : matplotlib figure
This is either the created figure instance or the one associated
with ax if ax is given.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure
doc = {'description':
'''Fan and Gijbels example function 1
linear trend plus a hump
''',
'ref':
'''
References
----------
Fan, Jianqing, and Irene Gijbels. 1992. "Variable Bandwidth and Local
Linear Regression Smoothers."
The Annals of Statistics 20 (4) (December): 2008-2036. doi:10.2307/2242378.
'''}
class UnivariateFanGijbels1(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.7
self.func = fg1
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
doc['description'] =\
'''Fan and Gijbels example function 2
sin plus a hump
'''
class UnivariateFanGijbels2(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.5
self.func = fg2
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels1EU(_UnivariateFunction):
'''
Eubank p.179f
'''
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
if distr_x is None:
from scipy import stats
distr_x = stats.uniform
self.s_noise = 0.15
self.func = fg1eu
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFunc1(_UnivariateFunction):
'''
made up, with sin and quadratic trend
'''
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None and distr_x is None:
from scipy import stats
distr_x = stats.uniform(-2, 4)
else:
nobs = x.shape[0]
self.s_noise = 2.
self.func = func1
super(UnivariateFunc1, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
def het_scale(self, x):
return np.sqrt(np.abs(3+x))
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e200.py | 2 | 6685 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
global source
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[2500] * 5,
on_power_thresholds=[5] * 5,
max_input_power=2500,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1520,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
input_padding=4,
include_diff=False,
clip_appliance_power=False
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
# 'W': Uniform()
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
rexshihaoren/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 67 | 14842 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = assert_warns(DeprecationWarning, make_multilabel_classification,
n_samples=100, n_features=20, n_classes=3,
random_state=0, allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator=True,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
return_indicator=True, allow_unlabeled=allow_unlabeled,
return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/manifold/tests/test_t_sne.py | 6 | 9770 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit_transform(X)
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
eramirem/astroML | book_figures/chapter3/fig_kurtosis_skew.py | 3 | 2902 | r"""
Kurtosis and Skew
-----------------
Figure 3.6.
An example of distributions with different skewness
:math:`\Sigma` (top panel) and kurtosis K (bottom panel). The modified
Gaussian in the upper panel is a normal distribution multiplied by a
Gram-Charlier series (see eq. 4.70), with a0 = 2, a1 = 1, and a2 = 0.5.
The log-normal has :math:`\sigma = 1.2`.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
fig = plt.figure(figsize=(5, 6.25))
fig.subplots_adjust(right=0.95, hspace=0.05, bottom=0.07, top=0.95)
# First show distributions with different skeq
ax = fig.add_subplot(211)
x = np.linspace(-8, 8, 1000)
N = stats.norm(0, 1)
l1, = ax.plot(x, N.pdf(x), '-k',
label=r'${\rm Gaussian,}\ \Sigma=0$')
l2, = ax.plot(x, 0.5 * N.pdf(x) * (2 + x + 0.5 * (x * x - 1)),
'--k', label=r'${\rm mod.\ Gauss,}\ \Sigma=-0.36$')
l3, = ax.plot(x[499:], stats.lognorm(1.2).pdf(x[499:]), '-.k',
label=r'$\rm log\ normal,\ \Sigma=11.2$')
ax.set_xlim(-5, 5)
ax.set_ylim(0, 0.7001)
ax.set_ylabel('$p(x)$')
ax.xaxis.set_major_formatter(plt.NullFormatter())
# trick to show multiple legends
leg1 = ax.legend([l1], [l1.get_label()], loc=1)
leg2 = ax.legend([l2, l3], (l2.get_label(), l3.get_label()), loc=2)
ax.add_artist(leg1)
ax.set_title('Skew $\Sigma$ and Kurtosis $K$')
# next show distributions with different kurtosis
ax = fig.add_subplot(212)
x = np.linspace(-5, 5, 1000)
l1, = ax.plot(x, stats.laplace(0, 1).pdf(x), '--k',
label=r'${\rm Laplace,}\ K=+3$')
l2, = ax.plot(x, stats.norm(0, 1).pdf(x), '-k',
label=r'${\rm Gaussian,}\ K=0$')
l3, = ax.plot(x, stats.cosine(0, 1).pdf(x), '-.k',
label=r'${\rm Cosine,}\ K=-0.59$')
l4, = ax.plot(x, stats.uniform(-2, 4).pdf(x), ':k',
label=r'${\rm Uniform,}\ K=-1.2$')
ax.set_xlim(-5, 5)
ax.set_ylim(0, 0.55)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')
# trick to show multiple legends
leg1 = ax.legend((l1, l2), (l1.get_label(), l2.get_label()), loc=2)
leg2 = ax.legend((l3, l4), (l3.get_label(), l4.get_label()), loc=1)
ax.add_artist(leg1)
plt.show()
| bsd-2-clause |
sanjayankur31/nest-simulator | pynest/examples/spatial/grid_iaf_irr.py | 20 | 1453 | # -*- coding: utf-8 -*-
#
# grid_iaf_irr.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create 12 freely placed iaf_psc_alpha neurons
-----------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
pos = nest.spatial.free([nest.random.uniform(-0.75, 0.75), nest.random.uniform(-0.5, 0.5)], extent=[2., 1.5])
l1 = nest.Create('iaf_psc_alpha', 12, positions=pos)
nest.PrintNodes()
nest.PlotLayer(l1, nodesize=50)
# beautify
plt.axis([-1.0, 1.0, -0.75, 0.75])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('Extent: 2.0')
plt.ylabel('Extent: 1.5')
plt.show()
# plt.savefig('grid_iaf_irr.png')
| gpl-2.0 |
tjlaboss/openmc | openmc/data/photon.py | 8 | 44955 | from collections import OrderedDict
from collections.abc import Mapping, Callable
from copy import deepcopy
from io import StringIO
from math import pi
from numbers import Integral, Real
import os
import h5py
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from . import HDF5_VERSION
from .ace import Table, get_metadata, get_table
from .data import ATOMIC_SYMBOL, EV_PER_MEV
from .endf import Evaluation, get_head_record, get_tab1_record, get_list_record
from .function import Tabulated1D
# Constants
MASS_ELECTRON_EV = 0.5109989461e6 # Electron mass energy
PLANCK_C = 1.2398419739062977e4 # Planck's constant times c in eV-Angstroms
FINE_STRUCTURE = 137.035999139 # Inverse fine structure constant
CM_PER_ANGSTROM = 1.0e-8
# classical electron radius in cm
R0 = CM_PER_ANGSTROM * PLANCK_C / (2.0 * pi * FINE_STRUCTURE * MASS_ELECTRON_EV)
# Electron subshell labels
_SUBSHELLS = [None, 'K', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5',
'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2', 'O3',
'O4', 'O5', 'O6', 'O7', 'O8', 'O9', 'P1', 'P2', 'P3', 'P4',
'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'Q1', 'Q2', 'Q3']
_REACTION_NAME = {
501: ('Total photon interaction', 'total'),
502: ('Photon coherent scattering', 'coherent'),
504: ('Photon incoherent scattering', 'incoherent'),
515: ('Pair production, electron field', 'pair_production_electron'),
516: ('Total pair production', 'pair_production_total'),
517: ('Pair production, nuclear field', 'pair_production_nuclear'),
522: ('Photoelectric absorption', 'photoelectric'),
525: ('Heating', 'heating'),
526: ('Electro-atomic scattering', 'electro_atomic_scat'),
527: ('Electro-atomic bremsstrahlung', 'electro_atomic_brem'),
528: ('Electro-atomic excitation', 'electro_atomic_excit'),
534: ('K (1s1/2) subshell photoelectric', 'K'),
535: ('L1 (2s1/2) subshell photoelectric', 'L1'),
536: ('L2 (2p1/2) subshell photoelectric', 'L2'),
537: ('L3 (2p3/2) subshell photoelectric', 'L3'),
538: ('M1 (3s1/2) subshell photoelectric', 'M1'),
539: ('M2 (3p1/2) subshell photoelectric', 'M2'),
540: ('M3 (3p3/2) subshell photoelectric', 'M3'),
541: ('M4 (3d3/2) subshell photoelectric', 'M4'),
542: ('M5 (3d5/2) subshell photoelectric', 'M5'),
543: ('N1 (4s1/2) subshell photoelectric', 'N1'),
544: ('N2 (4p1/2) subshell photoelectric', 'N2'),
545: ('N3 (4p3/2) subshell photoelectric', 'N3'),
546: ('N4 (4d3/2) subshell photoelectric', 'N4'),
547: ('N5 (4d5/2) subshell photoelectric', 'N5'),
548: ('N6 (4f5/2) subshell photoelectric', 'N6'),
549: ('N7 (4f7/2) subshell photoelectric', 'N7'),
550: ('O1 (5s1/2) subshell photoelectric', 'O1'),
551: ('O2 (5p1/2) subshell photoelectric', 'O2'),
552: ('O3 (5p3/2) subshell photoelectric', 'O3'),
553: ('O4 (5d3/2) subshell photoelectric', 'O4'),
554: ('O5 (5d5/2) subshell photoelectric', 'O5'),
555: ('O6 (5f5/2) subshell photoelectric', 'O6'),
556: ('O7 (5f7/2) subshell photoelectric', 'O7'),
557: ('O8 (5g7/2) subshell photoelectric', 'O8'),
558: ('O9 (5g9/2) subshell photoelectric', 'O9'),
559: ('P1 (6s1/2) subshell photoelectric', 'P1'),
560: ('P2 (6p1/2) subshell photoelectric', 'P2'),
561: ('P3 (6p3/2) subshell photoelectric', 'P3'),
562: ('P4 (6d3/2) subshell photoelectric', 'P4'),
563: ('P5 (6d5/2) subshell photoelectric', 'P5'),
564: ('P6 (6f5/2) subshell photoelectric', 'P6'),
565: ('P7 (6f7/2) subshell photoelectric', 'P7'),
566: ('P8 (6g7/2) subshell photoelectric', 'P8'),
567: ('P9 (6g9/2) subshell photoelectric', 'P9'),
568: ('P10 (6h9/2) subshell photoelectric', 'P10'),
569: ('P11 (6h11/2) subshell photoelectric', 'P11'),
570: ('Q1 (7s1/2) subshell photoelectric', 'Q1'),
571: ('Q2 (7p1/2) subshell photoelectric', 'Q2'),
572: ('Q3 (7p3/2) subshell photoelectric', 'Q3')
}
# Compton profiles are read from a pre-generated HDF5 file when they are first
# needed. The dictionary stores an array of electron momentum values (at which
# the profiles are tabulated) with the key 'pz' and the profile for each element
# is a 2D array with shape (n_shells, n_momentum_values) stored on the key Z
_COMPTON_PROFILES = {}
# Scaled bremsstrahlung DCSs are read from a data file provided by Selzter and
# Berger when they are first needed. The dictionary stores an array of n
# incident electron kinetic energies with key 'electron_energies', an array of
# k reduced photon energies with key 'photon_energies', and the cross sections
# for each element are in a 2D array with shape (n, k) stored on the key 'Z'.
# It also stores data used for calculating the density effect correction and
# stopping power, namely, the mean excitation energy with the key 'I', number
# of electrons per subshell with the key 'num_electrons', and binding energies
# with the key 'ionization_energy'.
_BREMSSTRAHLUNG = {}
class AtomicRelaxation(EqualityMixin):
"""Atomic relaxation data.
This class stores the binding energy, number of electrons, and electron
transitions possible from ioniziation for each electron subshell of an
atom. All of the data originates from an ENDF-6 atomic relaxation
sub-library (NSUB=6). Instances of this class are not normally instantiated
directly but rather created using the factory method
:math:`AtomicRelaxation.from_endf`.
Parameters
----------
binding_energy : dict
Dictionary indicating the binding energy in eV (values) for given
subshells (keys). The subshells should be given as strings, e.g., 'K',
'L1', 'L2', etc.
num_electrons : dict
Dictionary indicating the number of electrons in a subshell when neutral
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc.
transitions : pandas.DataFrame
Dictionary indicating allowed transitions and their probabilities
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as
a DataFrame with columns indicating the secondary and tertiary subshell,
the energy of the transition in eV, and the fractional probability of
the transition.
Attributes
----------
binding_energy : dict
Dictionary indicating the binding energy in eV (values) for given
subshells (keys). The subshells should be given as strings, e.g., 'K',
'L1', 'L2', etc.
num_electrons : dict
Dictionary indicating the number of electrons in a subshell when neutral
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc.
transitions : pandas.DataFrame
Dictionary indicating allowed transitions and their probabilities
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as
a DataFrame with columns indicating the secondary and tertiary subshell,
the energy of the transition in eV, and the fractional probability of
the transition.
See Also
--------
IncidentPhoton
"""
def __init__(self, binding_energy, num_electrons, transitions):
self.binding_energy = binding_energy
self.num_electrons = num_electrons
self.transitions = transitions
self._e_fluorescence = {}
@property
def binding_energy(self):
return self._binding_energy
@property
def num_electrons(self):
return self._num_electrons
@property
def subshells(self):
return list(sorted(self.binding_energy.keys()))
@property
def transitions(self):
return self._transitions
@binding_energy.setter
def binding_energy(self, binding_energy):
cv.check_type('binding energies', binding_energy, Mapping)
for subshell, energy in binding_energy.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('binding energy', energy, Real)
cv.check_greater_than('binding energy', energy, 0.0, True)
self._binding_energy = binding_energy
@num_electrons.setter
def num_electrons(self, num_electrons):
cv.check_type('number of electrons', num_electrons, Mapping)
for subshell, num in num_electrons.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('number of electrons', num, Real)
cv.check_greater_than('number of electrons', num, 0.0, True)
self._num_electrons = num_electrons
@transitions.setter
def transitions(self, transitions):
cv.check_type('transitions', transitions, Mapping)
for subshell, df in transitions.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('transitions', df, pd.DataFrame)
self._transitions = transitions
@classmethod
def from_ace(cls, ace):
"""Generate atomic relaxation data from an ACE file
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
# Get shell designators
n = ace.nxs[7]
idx = ace.jxs[11]
shells = [_SUBSHELLS[int(i)] for i in ace.xss[idx : idx+n]]
# Get number of electrons for each shell
idx = ace.jxs[12]
for shell, num in zip(shells, ace.xss[idx : idx+n]):
num_electrons[shell] = num
# Get binding energy for each shell
idx = ace.jxs[13]
for shell, e in zip(shells, ace.xss[idx : idx+n]):
binding_energy[shell] = e*EV_PER_MEV
# Get transition table
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
idx = ace.jxs[18]
for i, subi in enumerate(shells):
n_transitions = int(ace.xss[ace.jxs[15] + i])
if n_transitions > 0:
records = []
for j in range(n_transitions):
subj = _SUBSHELLS[int(ace.xss[idx])]
subk = _SUBSHELLS[int(ace.xss[idx + 1])]
etr = ace.xss[idx + 2]*EV_PER_MEV
if j == 0:
ftr = ace.xss[idx + 3]
else:
ftr = ace.xss[idx + 3] - ace.xss[idx - 1]
records.append((subj, subk, etr, ftr))
idx += 4
# Create dataframe for transitions
transitions[subi] = pd.DataFrame.from_records(
records, columns=columns)
return cls(binding_energy, num_electrons, transitions)
@classmethod
def from_endf(cls, ev_or_filename):
"""Generate atomic relaxation data from an ENDF evaluation
Parameters
----------
ev_or_filename : str or openmc.data.endf.Evaluation
ENDF atomic relaxation evaluation to read from. If given as a
string, it is assumed to be the filename for the ENDF file.
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
if isinstance(ev_or_filename, Evaluation):
ev = ev_or_filename
else:
ev = Evaluation(ev_or_filename)
# Atomic relaxation data is always MF=28, MT=533
if (28, 533) not in ev.section:
raise IOError('{} does not appear to be an atomic relaxation '
'sublibrary.'.format(ev))
# Determine number of subshells
file_obj = StringIO(ev.section[28, 533])
params = get_head_record(file_obj)
n_subshells = params[4]
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
# Read data for each subshell
for i in range(n_subshells):
params, list_items = get_list_record(file_obj)
subi = _SUBSHELLS[int(params[0])]
n_transitions = int(params[5])
binding_energy[subi] = list_items[0]
num_electrons[subi] = list_items[1]
if n_transitions > 0:
# Read transition data
records = []
for j in range(n_transitions):
subj = _SUBSHELLS[int(list_items[6*(j+1)])]
subk = _SUBSHELLS[int(list_items[6*(j+1) + 1])]
etr = list_items[6*(j+1) + 2]
ftr = list_items[6*(j+1) + 3]
records.append((subj, subk, etr, ftr))
# Create dataframe for transitions
transitions[subi] = pd.DataFrame.from_records(
records, columns=columns)
# Return instance of class
return cls(binding_energy, num_electrons, transitions)
@classmethod
def from_hdf5(cls, group):
"""Generate atomic relaxation data from an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
designators = [s.decode() for s in group.attrs['designators']]
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
for shell in designators:
# Shell group
sub_group = group[shell]
# Read subshell binding energy and number of electrons
if 'binding_energy' in sub_group.attrs:
binding_energy[shell] = sub_group.attrs['binding_energy']
if 'num_electrons' in sub_group.attrs:
num_electrons[shell] = sub_group.attrs['num_electrons']
# Read transition data
if 'transitions' in sub_group:
df = pd.DataFrame(sub_group['transitions'][()],
columns=columns)
# Replace float indexes back to subshell strings
df[columns[:2]] = df[columns[:2]].replace(
np.arange(float(len(_SUBSHELLS))), _SUBSHELLS)
transitions[shell] = df
return cls(binding_energy, num_electrons, transitions)
def to_hdf5(self, group, shell):
"""Write atomic relaxation data to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
shell : str
The subshell to write data for
"""
# Write subshell binding energy and number of electrons
group.attrs['binding_energy'] = self.binding_energy[shell]
group.attrs['num_electrons'] = self.num_electrons[shell]
# Write transition data with replacements
if shell in self.transitions:
df = self.transitions[shell].replace(
_SUBSHELLS, range(len(_SUBSHELLS)))
group.create_dataset('transitions', data=df.values.astype(float))
class IncidentPhoton(EqualityMixin):
r"""Photon interaction data.
This class stores photo-atomic, photo-nuclear, atomic relaxation,
Compton profile, stopping power, and bremsstrahlung data assembled from
different sources. To create an instance, the factory method
:meth:`IncidentPhoton.from_endf` can be used. To add atomic relaxation or
Compton profile data, set the :attr:`IncidentPhoton.atomic_relaxation` and
:attr:`IncidentPhoton.compton_profiles` attributes directly.
Parameters
----------
atomic_number : int
Number of protons in the target nucleus
Attributes
----------
atomic_number : int
Number of protons in the target nucleus
atomic_relaxation : openmc.data.AtomicRelaxation or None
Atomic relaxation data
bremsstrahlung : dict
Dictionary of bremsstrahlung data with keys 'I' (mean excitation energy
in [eV]), 'num_electrons' (number of electrons in each subshell),
'ionization_energy' (ionization potential of each subshell),
'electron_energy' (incident electron kinetic energy values in [eV]),
'photon_energy' (ratio of the energy of the emitted photon to the
incident electron kinetic energy), and 'dcs' (cross section values in
[b]). The cross sections are in scaled form: :math:`(\beta^2/Z^2) E_k
(d\sigma/dE_k)`, where :math:`E_k` is the energy of the emitted photon.
A negative number of electrons in a subshell indicates conduction
electrons.
compton_profiles : dict
Dictionary of Compton profile data with keys 'num_electrons' (number of
electrons in each subshell), 'binding_energy' (ionization potential of
each subshell), and 'J' (Hartree-Fock Compton profile as a function of
the projection of the electron momentum on the scattering vector,
:math:`p_z` for each subshell). Note that subshell occupancies may not
match the atomic relaxation data.
reactions : collections.OrderedDict
Contains the cross sections for each photon reaction. The keys are MT
values and the values are instances of :class:`PhotonReaction`.
"""
def __init__(self, atomic_number):
self.atomic_number = atomic_number
self._atomic_relaxation = None
self.reactions = OrderedDict()
self.compton_profiles = {}
self.bremsstrahlung = {}
def __contains__(self, mt):
return mt in self.reactions
def __getitem__(self, mt):
if mt in self.reactions:
return self.reactions[mt]
else:
raise KeyError('No reaction with MT={}.'.format(mt))
def __repr__(self):
return "<IncidentPhoton: {}>".format(self.name)
def __iter__(self):
return iter(self.reactions.values())
@property
def atomic_number(self):
return self._atomic_number
@property
def atomic_relaxation(self):
return self._atomic_relaxation
@property
def name(self):
return ATOMIC_SYMBOL[self.atomic_number]
@atomic_number.setter
def atomic_number(self, atomic_number):
cv.check_type('atomic number', atomic_number, Integral)
cv.check_greater_than('atomic number', atomic_number, 0, True)
self._atomic_number = atomic_number
@atomic_relaxation.setter
def atomic_relaxation(self, atomic_relaxation):
cv.check_type('atomic relaxation data', atomic_relaxation,
AtomicRelaxation)
self._atomic_relaxation = atomic_relaxation
@classmethod
def from_ace(cls, ace_or_filename):
"""Generate incident photon data from an ACE table
Parameters
----------
ace_or_filename : str or openmc.data.ace.Table
ACE table to read from. If given as a string, it is assumed to be
the filename for the ACE file.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
# First obtain the data for the first provided ACE table/file
if isinstance(ace_or_filename, Table):
ace = ace_or_filename
else:
ace = get_table(ace_or_filename)
# Get atomic number based on name of ACE table
zaid, xs = ace.name.split('.')
if not xs.endswith('p'):
raise TypeError("{} is not a photoatomic transport ACE table.".format(ace))
Z = get_metadata(int(zaid))[2]
# Read each reaction
data = cls(Z)
for mt in (502, 504, 515, 522, 525):
data.reactions[mt] = PhotonReaction.from_ace(ace, mt)
# Get heating cross sections [eV-barn] from factors [eV per collision]
# by multiplying with total xs
data.reactions[525].xs.y *= sum([data.reactions[mt].xs.y for mt in
(502, 504, 515, 522)])
# Compton profiles
n_shell = ace.nxs[5]
if n_shell != 0:
# Get number of electrons in each shell
idx = ace.jxs[6]
data.compton_profiles['num_electrons'] = ace.xss[idx : idx+n_shell]
# Get binding energy for each shell
idx = ace.jxs[7]
e = ace.xss[idx : idx+n_shell]*EV_PER_MEV
data.compton_profiles['binding_energy'] = e
# Create Compton profile for each electron shell
profiles = []
for k in range(n_shell):
# Get number of momentum values and interpolation scheme
loca = int(ace.xss[ace.jxs[9] + k])
jj = int(ace.xss[ace.jxs[10] + loca - 1])
m = int(ace.xss[ace.jxs[10] + loca])
# Read momentum and PDF
idx = ace.jxs[10] + loca + 1
pz = ace.xss[idx : idx+m]
pdf = ace.xss[idx+m : idx+2*m]
# Create proflie function
J_k = Tabulated1D(pz, pdf, [m], [jj])
profiles.append(J_k)
data.compton_profiles['J'] = profiles
# Subshell photoelectric xs and atomic relaxation data
if ace.nxs[7] > 0:
data.atomic_relaxation = AtomicRelaxation.from_ace(ace)
# Get subshell designators
n_subshells = ace.nxs[7]
idx = ace.jxs[11]
designators = [int(i) for i in ace.xss[idx : idx+n_subshells]]
# Get energy grid for subshell photoionization
n_energy = ace.nxs[3]
idx = ace.jxs[1]
energy = np.exp(ace.xss[idx : idx+n_energy])*EV_PER_MEV
# Get cross section for each subshell
idx = ace.jxs[16]
for d in designators:
# Create photon reaction
mt = 533 + d
rx = PhotonReaction(mt)
data.reactions[mt] = rx
# Store cross section, determining threshold
xs = ace.xss[idx : idx+n_energy].copy()
nonzero = (xs != 0.0)
xs[nonzero] = np.exp(xs[nonzero])
threshold = np.where(xs > 0.0)[0][0]
rx.xs = Tabulated1D(energy[threshold:], xs[threshold:],
[n_energy - threshold], [5])
idx += n_energy
# Copy binding energy
shell = _SUBSHELLS[d]
e = data.atomic_relaxation.binding_energy[shell]
rx.subshell_binding_energy = e
else:
raise ValueError("ACE table {} does not have subshell data. Only "
"newer ACE photoatomic libraries are supported "
"(e.g., eprdata14).".format(ace.name))
# Add bremsstrahlung DCS data
data._add_bremsstrahlung()
return data
@classmethod
def from_endf(cls, photoatomic, relaxation=None):
"""Generate incident photon data from an ENDF evaluation
Parameters
----------
photoatomic : str or openmc.data.endf.Evaluation
ENDF photoatomic data evaluation to read from. If given as a string,
it is assumed to be the filename for the ENDF file.
relaxation : str or openmc.data.endf.Evaluation, optional
ENDF atomic relaxation data evaluation to read from. If given as a
string, it is assumed to be the filename for the ENDF file.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
if isinstance(photoatomic, Evaluation):
ev = photoatomic
else:
ev = Evaluation(photoatomic)
Z = ev.target['atomic_number']
data = cls(Z)
# Read each reaction
for mf, mt, nc, mod in ev.reaction_list:
if mf == 23:
data.reactions[mt] = PhotonReaction.from_endf(ev, mt)
# Add atomic relaxation data if it hasn't been added already
if relaxation is not None:
data.atomic_relaxation = AtomicRelaxation.from_endf(relaxation)
# If Compton profile data hasn't been loaded, do so
if not _COMPTON_PROFILES:
filename = os.path.join(os.path.dirname(__file__), 'compton_profiles.h5')
with h5py.File(filename, 'r') as f:
_COMPTON_PROFILES['pz'] = f['pz'][()]
for i in range(1, 101):
group = f['{:03}'.format(i)]
num_electrons = group['num_electrons'][()]
binding_energy = group['binding_energy'][()]*EV_PER_MEV
J = group['J'][()]
_COMPTON_PROFILES[i] = {'num_electrons': num_electrons,
'binding_energy': binding_energy,
'J': J}
# Add Compton profile data
pz = _COMPTON_PROFILES['pz']
profile = _COMPTON_PROFILES[Z]
data.compton_profiles['num_electrons'] = profile['num_electrons']
data.compton_profiles['binding_energy'] = profile['binding_energy']
data.compton_profiles['J'] = [Tabulated1D(pz, J_k) for J_k in profile['J']]
# Add bremsstrahlung DCS data
data._add_bremsstrahlung()
return data
@classmethod
def from_hdf5(cls, group_or_filename):
"""Generate photon reaction from an HDF5 group
Parameters
----------
group_or_filename : h5py.Group or str
HDF5 group containing interaction data. If given as a string, it is
assumed to be the filename for the HDF5 file, and the first group is
used to read from.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
if isinstance(group_or_filename, h5py.Group):
group = group_or_filename
need_to_close = False
else:
h5file = h5py.File(str(group_or_filename), 'r')
need_to_close = True
# Make sure version matches
if 'version' in h5file.attrs:
major, minor = h5file.attrs['version']
# For now all versions of HDF5 data can be read
else:
raise IOError(
'HDF5 data does not indicate a version. Your installation '
'of the OpenMC Python API expects version {}.x data.'
.format(HDF5_VERSION_MAJOR))
group = list(h5file.values())[0]
Z = group.attrs['Z']
data = cls(Z)
# Read energy grid
energy = group['energy'][()]
# Read cross section data
for mt, (name, key) in _REACTION_NAME.items():
if key in group:
rgroup = group[key]
elif key in group['subshells']:
rgroup = group['subshells'][key]
else:
continue
data.reactions[mt] = PhotonReaction.from_hdf5(rgroup, mt, energy)
# Check for necessary reactions
for mt in (502, 504, 522):
assert mt in data, "Reaction {} not found".format(mt)
# Read atomic relaxation
data.atomic_relaxation = AtomicRelaxation.from_hdf5(group['subshells'])
# Read Compton profiles
if 'compton_profiles' in group:
rgroup = group['compton_profiles']
profile = data.compton_profiles
profile['num_electrons'] = rgroup['num_electrons'][()]
profile['binding_energy'] = rgroup['binding_energy'][()]
# Get electron momentum values
pz = rgroup['pz'][()]
J = rgroup['J'][()]
if pz.size != J.shape[1]:
raise ValueError("'J' array shape is not consistent with the "
"'pz' array shape")
profile['J'] = [Tabulated1D(pz, Jk) for Jk in J]
# Read bremsstrahlung
if 'bremsstrahlung' in group:
rgroup = group['bremsstrahlung']
data.bremsstrahlung['I'] = rgroup.attrs['I']
for key in ('dcs', 'electron_energy', 'ionization_energy',
'num_electrons', 'photon_energy'):
data.bremsstrahlung[key] = rgroup[key][()]
# If HDF5 file was opened here, make sure it gets closed
if need_to_close:
h5file.close()
return data
def export_to_hdf5(self, path, mode='a', libver='earliest'):
"""Export incident photon data to an HDF5 file.
Parameters
----------
path : str
Path to write HDF5 file to
mode : {'r+', 'w', 'x', 'a'}
Mode that is used to open the HDF5 file. This is the second argument
to the :class:`h5py.File` constructor.
libver : {'earliest', 'latest'}
Compatibility mode for the HDF5 file. 'latest' will produce files
that are less backwards compatible but have performance benefits.
"""
with h5py.File(str(path), mode, libver=libver) as f:
# Write filetype and version
f.attrs['filetype'] = np.string_('data_photon')
if 'version' not in f.attrs:
f.attrs['version'] = np.array(HDF5_VERSION)
group = f.create_group(self.name)
group.attrs['Z'] = Z = self.atomic_number
# Determine union energy grid
union_grid = np.array([])
for rx in self:
union_grid = np.union1d(union_grid, rx.xs.x)
group.create_dataset('energy', data=union_grid)
# Write cross sections
shell_group = group.create_group('subshells')
designators = []
for mt, rx in self.reactions.items():
name, key = _REACTION_NAME[mt]
if mt in (502, 504, 515, 517, 522, 525):
sub_group = group.create_group(key)
elif mt >= 534 and mt <= 572:
# Subshell
designators.append(key)
sub_group = shell_group.create_group(key)
# Write atomic relaxation
if self.atomic_relaxation is not None:
if key in self.atomic_relaxation.subshells:
self.atomic_relaxation.to_hdf5(sub_group, key)
else:
continue
rx.to_hdf5(sub_group, union_grid, Z)
shell_group.attrs['designators'] = np.array(designators, dtype='S')
# Write Compton profiles
if self.compton_profiles:
compton_group = group.create_group('compton_profiles')
profile = self.compton_profiles
compton_group.create_dataset('num_electrons',
data=profile['num_electrons'])
compton_group.create_dataset('binding_energy',
data=profile['binding_energy'])
# Get electron momentum values
compton_group.create_dataset('pz', data=profile['J'][0].x)
# Create/write 2D array of profiles
J = np.array([Jk.y for Jk in profile['J']])
compton_group.create_dataset('J', data=J)
# Write bremsstrahlung
if self.bremsstrahlung:
brem_group = group.create_group('bremsstrahlung')
for key, value in self.bremsstrahlung.items():
if key == 'I':
brem_group.attrs[key] = value
else:
brem_group.create_dataset(key, data=value)
def _add_bremsstrahlung(self):
"""Add the data used in the thick-target bremsstrahlung approximation
"""
# Load bremsstrahlung data if it has not yet been loaded
if not _BREMSSTRAHLUNG:
# Add data used for density effect correction
filename = os.path.join(os.path.dirname(__file__), 'density_effect.h5')
with h5py.File(filename, 'r') as f:
for i in range(1, 101):
group = f['{:03}'.format(i)]
_BREMSSTRAHLUNG[i] = {
'I': group.attrs['I'],
'num_electrons': group['num_electrons'][()],
'ionization_energy': group['ionization_energy'][()]
}
filename = os.path.join(os.path.dirname(__file__), 'BREMX.DAT')
with open(filename, 'r') as fh:
brem = fh.read().split()
# Incident electron kinetic energy grid in eV
_BREMSSTRAHLUNG['electron_energy'] = np.logspace(3, 9, 200)
log_energy = np.log(_BREMSSTRAHLUNG['electron_energy'])
# Get number of tabulated electron and photon energy values
n = int(brem[37])
k = int(brem[38])
# Index in data
p = 39
# Get log of incident electron kinetic energy values, used for
# cubic spline interpolation in log energy. Units are in MeV, so
# convert to eV.
logx = np.log(np.fromiter(brem[p:p+n], float, n)*EV_PER_MEV)
p += n
# Get reduced photon energy values
_BREMSSTRAHLUNG['photon_energy'] = np.fromiter(brem[p:p+k], float, k)
p += k
for i in range(1, 101):
dcs = np.empty([len(log_energy), k])
# Get the scaled cross section values for each electron energy
# and reduced photon energy for this Z. Units are in mb, so
# convert to b.
y = np.reshape(np.fromiter(brem[p:p+n*k], float, n*k), (n, k))*1.0e-3
p += k*n
for j in range(k):
# Cubic spline interpolation in log energy and linear DCS
cs = CubicSpline(logx, y[:, j])
# Get scaled DCS values (barns) on new energy grid
dcs[:, j] = cs(log_energy)
_BREMSSTRAHLUNG[i]['dcs'] = dcs
# Add bremsstrahlung DCS data
self.bremsstrahlung['electron_energy'] = _BREMSSTRAHLUNG['electron_energy']
self.bremsstrahlung['photon_energy'] = _BREMSSTRAHLUNG['photon_energy']
self.bremsstrahlung.update(_BREMSSTRAHLUNG[self.atomic_number])
class PhotonReaction(EqualityMixin):
"""Photon-induced reaction
Parameters
----------
mt : int
The ENDF MT number for this reaction.
Attributes
----------
anomalous_real : openmc.data.Tabulated1D
Real part of the anomalous scattering factor
anomlaous_imag : openmc.data.Tabulated1D
Imaginary part of the anomalous scatttering factor
mt : int
The ENDF MT number for this reaction.
scattering_factor : openmc.data.Tabulated1D
Coherent or incoherent form factor.
xs : Callable
Cross section as a function of incident photon energy
"""
def __init__(self, mt):
self.mt = mt
self._xs = None
self._scattering_factor = None
self._anomalous_real = None
self._anomalous_imag = None
def __repr__(self):
if self.mt in _REACTION_NAME:
return "<Photon Reaction: MT={} {}>".format(
self.mt, _REACTION_NAME[self.mt][0])
else:
return "<Photon Reaction: MT={}>".format(self.mt)
@property
def anomalous_real(self):
return self._anomalous_real
@property
def anomalous_imag(self):
return self._anomalous_imag
@property
def scattering_factor(self):
return self._scattering_factor
@property
def xs(self):
return self._xs
@anomalous_real.setter
def anomalous_real(self, anomalous_real):
cv.check_type('real part of anomalous scattering factor',
anomalous_real, Callable)
self._anomalous_real = anomalous_real
@anomalous_imag.setter
def anomalous_imag(self, anomalous_imag):
cv.check_type('imaginary part of anomalous scattering factor',
anomalous_imag, Callable)
self._anomalous_imag = anomalous_imag
@scattering_factor.setter
def scattering_factor(self, scattering_factor):
cv.check_type('scattering factor', scattering_factor, Callable)
self._scattering_factor = scattering_factor
@xs.setter
def xs(self, xs):
cv.check_type('reaction cross section', xs, Callable)
self._xs = xs
@classmethod
def from_ace(cls, ace, mt):
"""Generate photon reaction from an ACE table
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
mt : int
The MT value of the reaction to get data for
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
# Create instance
rx = cls(mt)
# Get energy grid (stored as logarithms)
n = ace.nxs[3]
idx = ace.jxs[1]
energy = np.exp(ace.xss[idx : idx+n])*EV_PER_MEV
# Get index for appropriate reaction
if mt == 502:
# Coherent scattering
idx = ace.jxs[1] + 2*n
elif mt == 504:
# Incoherent scattering
idx = ace.jxs[1] + n
elif mt == 515:
# Pair production
idx = ace.jxs[1] + 4*n
elif mt == 522:
# Photoelectric
idx = ace.jxs[1] + 3*n
elif mt == 525:
# Heating
idx = ace.jxs[5]
else:
raise ValueError('ACE photoatomic cross sections do not have '
'data for MT={}.'.format(mt))
# Store cross section
xs = ace.xss[idx : idx+n].copy()
if mt == 525:
# Get heating factors in [eV per collision]
xs *= EV_PER_MEV
else:
nonzero = (xs != 0.0)
xs[nonzero] = np.exp(xs[nonzero])
rx.xs = Tabulated1D(energy, xs, [n], [5])
# Get form factors for incoherent/coherent scattering
new_format = (ace.nxs[6] > 0)
if mt == 502:
idx = ace.jxs[3]
if new_format:
n = (ace.jxs[4] - ace.jxs[3]) // 3
x = ace.xss[idx : idx+n]
idx += n
else:
x = np.array([
0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.08, 0.1, 0.12,
0.15, 0.18, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55,
0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4,
3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0, 5.2, 5.4, 5.6,
5.8, 6.0])
n = x.size
ff = ace.xss[idx+n : idx+2*n]
rx.scattering_factor = Tabulated1D(x, ff)
elif mt == 504:
idx = ace.jxs[2]
if new_format:
n = (ace.jxs[3] - ace.jxs[2]) // 2
x = ace.xss[idx : idx+n]
idx += n
else:
x = np.array([
0.0, 0.005, 0.01, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6,
0.7, 0.8, 0.9, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 8.0
])
n = x.size
ff = ace.xss[idx : idx+n]
rx.scattering_factor = Tabulated1D(x, ff)
return rx
@classmethod
def from_endf(cls, ev, mt):
"""Generate photon reaction from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF photo-atomic interaction data evaluation
mt : int
The MT value of the reaction to get data for
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
rx = cls(mt)
# Read photon cross section
if (23, mt) in ev.section:
file_obj = StringIO(ev.section[23, mt])
get_head_record(file_obj)
params, rx.xs = get_tab1_record(file_obj)
# Set subshell binding energy and/or fluorescence yield
if mt >= 534 and mt <= 599:
rx.subshell_binding_energy = params[0]
if mt >= 534 and mt <= 572:
rx.fluorescence_yield = params[1]
# Read form factors / scattering functions
if (27, mt) in ev.section:
file_obj = StringIO(ev.section[27, mt])
get_head_record(file_obj)
params, rx.scattering_factor = get_tab1_record(file_obj)
# Check for anomalous scattering factor
if mt == 502:
if (27, 506) in ev.section:
file_obj = StringIO(ev.section[27, 506])
get_head_record(file_obj)
params, rx.anomalous_real = get_tab1_record(file_obj)
if (27, 505) in ev.section:
file_obj = StringIO(ev.section[27, 505])
get_head_record(file_obj)
params, rx.anomalous_imag = get_tab1_record(file_obj)
return rx
@classmethod
def from_hdf5(cls, group, mt, energy):
"""Generate photon reaction from an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to read from
mt : int
The MT value of the reaction to get data for
energy : Iterable of float
arrays of energies at which cross sections are tabulated at
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
# Create instance
rx = cls(mt)
# Cross sections
xs = group['xs'][()]
# Replace zero elements to small non-zero to enable log-log
xs[xs == 0.0] = np.exp(-500.0)
# Threshold
threshold_idx = 0
if 'threshold_idx' in group['xs'].attrs:
threshold_idx = group['xs'].attrs['threshold_idx']
# Store cross section
rx.xs = Tabulated1D(energy[threshold_idx:], xs, [len(xs)], [5])
# Check for anomalous scattering factor
if 'anomalous_real' in group:
rx.anomalous_real = Tabulated1D.from_hdf5(group['anomalous_real'])
if 'anomalous_imag' in group:
rx.anomalous_imag = Tabulated1D.from_hdf5(group['anomalous_imag'])
# Check for factors / scattering functions
if 'scattering_factor' in group:
rx.scattering_factor = Tabulated1D.from_hdf5(group['scattering_factor'])
return rx
def to_hdf5(self, group, energy, Z):
"""Write photon reaction to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
energy : Iterable of float
arrays of energies at which cross sections are tabulated at
Z : int
atomic number
"""
# Write cross sections
if self.mt >= 534 and self.mt <= 572:
# Determine threshold
threshold = self.xs.x[0]
idx = np.searchsorted(energy, threshold, side='right') - 1
# Interpolate cross section onto union grid and write
photoionization = self.xs(energy[idx:])
group.create_dataset('xs', data=photoionization)
assert len(energy) == len(photoionization) + idx
group['xs'].attrs['threshold_idx'] = idx
else:
group.create_dataset('xs', data=self.xs(energy))
# Write scattering factor
if self.scattering_factor is not None:
if self.mt == 502:
# Create integrated form factor
ff = deepcopy(self.scattering_factor)
ff.x *= ff.x
ff.y *= ff.y/Z**2
int_ff = Tabulated1D(ff.x, ff.integral())
int_ff.to_hdf5(group, 'integrated_scattering_factor')
self.scattering_factor.to_hdf5(group, 'scattering_factor')
if self.anomalous_real is not None:
self.anomalous_real.to_hdf5(group, 'anomalous_real')
if self.anomalous_imag is not None:
self.anomalous_imag.to_hdf5(group, 'anomalous_imag')
| mit |
flightgong/scikit-learn | sklearn/datasets/mlcomp.py | 5 | 3805 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
`set_` : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
fw1121/luigi | examples/pyspark_wc.py | 56 | 3361 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
nelango/ViralityAnalysis | model/lib/pandas/core/api.py | 9 | 1318 |
# pylint: disable=W0614,W0401,W0611
import numpy as np
from pandas.core.algorithms import factorize, match, unique, value_counts
from pandas.core.common import isnull, notnull
from pandas.core.categorical import Categorical
from pandas.core.groupby import Grouper
from pandas.core.format import set_eng_float_format
from pandas.core.index import Index, CategoricalIndex, Int64Index, Float64Index, MultiIndex
from pandas.core.series import Series, TimeSeries
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
from pandas.core.groupby import groupby
from pandas.core.reshape import (pivot_simple as pivot, get_dummies,
lreshape, wide_to_long)
from pandas.core.indexing import IndexSlice
from pandas.tseries.offsets import DateOffset
from pandas.tseries.tools import to_datetime
from pandas.tseries.index import (DatetimeIndex, Timestamp,
date_range, bdate_range)
from pandas.tseries.tdi import TimedeltaIndex, Timedelta
from pandas.tseries.period import Period, PeriodIndex
# legacy
import pandas.core.datetools as datetools
from pandas.core.config import (get_option, set_option, reset_option,
describe_option, option_context, options)
| mit |
Leguark/pynoddy | pynoddy/Copy of history.py | 3 | 46835 | '''Noddy history file wrapper
Created on 24/03/2014
@author: Florian Wellmann
'''
import time # for header in model generation
import numpy as np
# import numpy as np
# import matplotlib.pyplot as plt
import events
class NoddyHistory():
"""Class container for Noddy history files"""
def __init__(self, history=None, **kwds):
"""Methods to analyse and change Noddy history files
**Arguments**:
- *history* = string : Name of Noddy history file
**Optional Keywords**:
- *url* = url : link to history file on web (e.g. to download
and open directly from Atlas of Structural Geophysics,
http://virtualexplorer.com.au/special/noddyatlas/index.html
Note: if both a (local) history is given and a URL, the local
file is opened!
"""
if history is None:
if kwds.has_key("url"):
self.load_history_from_url(kwds['url'])
self.determine_events()
else:
# generate a new history
self.create_new_history()
else:
# load existing history
self.load_history(history)
self.determine_events()
def info(self, **kwds):
"""Print out model information
**Optional keywords**:
- *events_only* = bool : only information on events
"""
events_only = kwds.get("events_only", False)
if not events_only:
# First: check if all information available
if not hasattr(self, 'extent_x'): self.get_extent()
if not hasattr(self, 'origin_x'): self.get_origin()
if not hasattr(self, 'cube_size'): self.get_cube_size()
if not hasattr(self, 'filename'): self.get_filename()
if not hasattr(self, 'date_saved'): self.get_date_saved()
print(60 * "*" + "\n\t\t\tModel Information\n" + 60 * "*")
print("\n")
if self.n_events == 0:
print("The model does not yet contain any events\n")
else:
print("This model consists of %d events:" % self.n_events)
for k,ev in self.events.items():
print("\t(%d) - %s" % (k,ev.event_type))
if not events_only:
print("The model extent is:")
print("\tx - %.1f m" % self.extent_x)
print("\ty - %.1f m" % self.extent_y)
print("\tz - %.1f m" % self.extent_z)
print("Number of cells in each direction:")
print("\tnx = %d" % (self.extent_x / self.cube_size))
print("\tny = %d" % (self.extent_y / self.cube_size))
print("\tnz = %d" % (self.extent_z / self.cube_size))
print("The model origin is located at: \n\t(%.1f, %.1f, %.1f)" % (self.origin_x,
self.origin_y,
self.origin_z))
print("The cubesize for model export is: \n\t%d m" % self.cube_size)
# and now some metadata
print("\n")
print(60 * "*" + "\n\t\t\tMeta Data\n" + 60 * "*")
print("\n")
print("The filename of the model is:\n\t%s" % self.filename)
print("It was last saved (if origin was a history file!) at:\n\t%s\n" % self.date_saved)
def get_origin(self):
"""Get coordinates of model origin and return and store in local variables
**Returns**: (origin_x, origin_y, origin_z)
"""
for i,line in enumerate(self.history_lines):
if "Origin X" in line:
self.origin_x = float(self.history_lines[i].split("=")[1])
self.origin_y = float(self.history_lines[i+1].split("=")[1])
self.origin_z = float(self.history_lines[i+2].split("=")[1])
break
return(self.origin_x, self.origin_y, self.origin_z)
def set_origin(self, origin_x, origin_y, origin_z):
"""Set coordinates of model origin and update local variables
**Arguments**:
- *origin_x* = float : x-location of model origin
- *origin_y* = float : y-location of model origin
- *origin_z* = float : z-location of model origin
"""
self.origin_x = origin_x
self.origin_y = origin_y
self.origin_z = origin_z
origin_x_line = " Origin X = %.2f\n" % origin_x
origin_y_line = " Origin Y = %.2f\n" % origin_y
origin_z_line = " Origin Z = %.2f\n" % origin_z
for i,line in enumerate(self.history_lines):
if "Origin X" in line:
self.history_lines[i] = origin_x_line
self.history_lines[i+1] = origin_y_line
self.history_lines[i+2] = origin_z_line
break
def get_extent(self):
"""Get model extent and return and store in local variables
**Returns**: (extent_x, extent_y, extent_z)
"""
for i,line in enumerate(self.history_lines):
if "Length X" in line:
self.extent_x = float(self.history_lines[i].split("=")[1])
self.extent_y = float(self.history_lines[i+1].split("=")[1])
self.extent_z = float(self.history_lines[i+2].split("=")[1])
break
return(self.extent_x, self.extent_y, self.extent_z)
def set_extent(self, extent_x, extent_y, extent_z):
"""Set model extent and update local variables
**Arguments**:
- *extent_x* = float : extent in x-direction
- *extent_y* = float : extent in y-direction
- *extent_z* = float : extent in z-direction
"""
self.extent_x = extent_x
self.extent_y = extent_y
self.extent_z = extent_z
extent_x_line = " Length X = %.2f\n" % extent_x
extent_y_line = " Length Y = %.2f\n" % extent_y
extent_z_line = " Length Z = %.2f\n" % extent_z
for i,line in enumerate(self.history_lines):
if "Length X" in line:
self.history_lines[i] = extent_x_line
self.history_lines[i+1] = extent_y_line
self.history_lines[i+2] = extent_z_line
break
def get_drillhole_data(self, x, y, **kwds):
"""Get geology values along 1-D profile at position x,y with a 1 m resolution
The following steps are performed:
1. creates a copy of the entire object,
2. sets values of origin, extent and geology cube size,
3. saves model to a temporary file,
4. runs Noddy on that file
5. opens and analyses output
6. deletes temporary files
Note: this method only works if write access to current directory
is enabled and noddy can be executed!
**Arguments**:
- *x* = float: x-position of drillhole
- *y* = float: y-positoin of drillhole
**Optional Arguments**:
- *z_min* = float : minimum depth of drillhole (default: model range)
- *z_max* = float : maximum depth of drillhole (default: model range)
- *resolution* = float : resolution along profile (default: 1 m)
"""
# resolve keywords
resolution = kwds.get("resolution", 1)
self.get_extent()
self.get_origin()
z_min = kwds.get("z_min", self.origin_z)
z_max = kwds.get("z_max", self.extent_z)
# 1. create copy
import copy
tmp_his = copy.deepcopy(self)
# 2. set values
tmp_his.set_origin(x, y, z_min)
tmp_his.set_extent(resolution, resolution, z_max)
tmp_his.change_cube_size(resolution)
# 3. save temporary file
tmp_his_file = "tmp_1D_drillhole.his"
tmp_his.write_history(tmp_his_file)
tmp_out_file = "tmp_1d_out"
# 4. run noddy
import pynoddy
import pynoddy.output
pynoddy.compute_model(tmp_his_file, tmp_out_file)
# 5. open output
tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
# 6.
return tmp_out.block[0,0,:]
def load_history(self, history):
"""Load Noddy history
**Arguments**:
- *history* = string : Name of Noddy history file
"""
self.history_lines = open(history, 'r').readlines()
def _get_footer_lines(self):
"""Get the footer lines from self.history_lines
The footer contains everything below events (all settings, etc.)"""
# get id of footer from history lines
for i,line in enumerate(self.history_lines):
if "#BlockOptions" in line:
break
print i
def load_history_from_url(self, url):
"""Directly load a Noddy history from a URL
This method is useful to load a model from the Structural Geophysics
Atlas on the pages of the Virtual Explorer.
See: http://virtualexplorer.com.au/special/noddyatlas/index.html
**Arguments**:
- *url* : url of history file
"""
import urllib2
response = urllib2.urlopen(url)
tmp_lines = response.read().split("\n")
self.history_lines = []
for line in tmp_lines:
# append EOL again for consistency
self.history_lines.append(line + "\n")
def determine_model_stratigraphy(self):
"""Determine stratigraphy of entire model from all events"""
self.model_stratigraphy = []
for e in np.sort(self.events.keys()):
if self.events[e].event_type == 'STRATIGRAPHY':
self.model_stratigraphy += self.events[e].layer_names
def determine_events(self):
"""Determine events and save line numbers
.. note:: Parsing of the history file is based on a fixed Noddy output order.
If this is, for some reason (e.g. in a changed version of Noddy) not the case, then
this parsing might fail!
"""
self._raw_events = []
for i,line in enumerate(self.history_lines):
if "No of Events" in line:
self.n_events = int(line.split("=")[1])
elif "Event #" in line:
event = {}
event['type'] = line.split('=')[1].rstrip()
event['num'] = int(line[7:9])
event['line_start'] = i
self._raw_events.append(event)
# finally: if the definition for BlockOptions starts, the event definition is over
elif "BlockOptions" in line:
last_event_stop = i-2
# now: find the line ends for the single event blocks
for i,event in enumerate(self._raw_events[1:]):
self._raw_events[i]['line_end'] = event['line_start']-1
# now adjust for last event
self._raw_events[-1]['line_end'] = last_event_stop
self.events = {} # idea: create events as dictionary so that it is easier
# to swap order later!
# now create proper event objects for these events
for e in self._raw_events:
event_lines = self.history_lines[e['line_start']:e['line_end']+1]
print e['type']
if 'FAULT' in e['type']:
ev = events.Fault(lines = event_lines)
# set specific aspects first
elif 'FOLD' in e['type']:
ev = events.Fold(lines = event_lines)
elif 'UNCONFORMITY' in e['type']:
ev = events.Unconformity(lines = event_lines)
elif 'STRATIGRAPHY' in e['type']:
ev = events.Stratigraphy(lines = event_lines)
elif 'TILT' in e['type']: # AK
ev = events.Tilt(lines = event_lines)
else: continue
# now set shared attributes (those defined in superclass Event)
order = e['num']
self.events[order] = ev
# determine overall begin and end of the history events
self.all_events_begin = self._raw_events[0]['line_start']
self.all_events_end = self._raw_events[-1]['line_end']
def get_cube_size(self):
"""Determine cube size for model export"""
for line in self.history_lines:
if 'Geophysics Cube Size' in line:
self.cube_size = float(line.split('=')[1].rstrip())
def get_filename(self):
"""Determine model filename from history file/ header"""
self.filename = self.history_lines[0].split('=')[1].rstrip()
def get_date_saved(self):
"""Determine the last savepoint of the file"""
self.date_saved = self.history_lines[1].split('=')[1].rstrip()
def change_cube_size(self, cube_size):
"""Change the model cube size (isotropic)
**Arguments**:
- *cube_size* = float : new model cube size
"""
# create local copy of history
lines_new = self.history_lines[:]
for i,line in enumerate(self.history_lines):
if 'Geophysics Cube Size' in line:
l = line.split('=')
l_new = '%7.2f\r\n' % cube_size
line_new = l[0] + "=" + l_new
lines_new[i] = line_new
# assign changed lines back to object
self.history_lines = lines_new[:]
def write_history_bak(self, filename):
"""Write history to new file
**Arguments**:
- *filename* = string : filename of new history file
.. hint:: Just love it how easy it is to 'write history' with Noddy ;-)
"""
# before saving: update all event properties (in case changes were made)
self.update_all_event_properties()
# First step: update history lines with events
all_event_lines = []
for event_id in sorted(self.events.keys()):
for line in self.events[event_id].event_lines:
all_event_lines.append(line)
# now substitute old with new lines:
self.history_lines[self.all_events_begin:self.all_events_end+1] = all_event_lines
f = open(filename, 'w')
for line in self.history_lines:
f.write(line)
f.close()
def swap_events(self, event_num_1, event_num_2):
"""Swap two geological events in the timeline
**Arguments**:
- *event_num_1/2* = int : number of events to be swapped ("order")
"""
# events have to be copied, otherwise only a reference is passed!
event_tmp = self.events[event_num_1]
self.events[event_num_1] = self.events[event_num_2]
self.events[event_num_2] = event_tmp
self.update_event_numbers()
def reorder_events(self, reorder_dict):
"""Reorder events accoring to assignment in reorder_dict
**Arguments**:
- *reorder_dict* = dict : for example {1 : 2, 2 : 3, 3 : 1}
"""
tmp_events = self.events.copy()
for key, value in reorder_dict.items():
tmp_events[value] = self.events[key]
self.events = tmp_events.copy()
self.update_event_numbers()
def update_event_numbers(self):
"""Update event numbers in 'Event #' line in noddy history file"""
for key, event in self.events.items():
event.set_event_number(key)
def update_all_event_properties(self):
"""Update properties of all events - in case changes were made"""
for event in self.events.values():
event.update_properties()
#
#class NewHistory():
# """Methods to create a Noddy model"""
#
def create_new_history(self):
"""Methods to create a Noddy model
"""
# set event counter
self.event_counter = 0
self.all_events_begin = 7 # default after header
self.all_events_end = 7
# initialise history lines
self.history_lines = []
self.events = {}
def get_ev_counter(self):
"""Event counter for implicit and continuous definition of events"""
self.event_counter += 1
return self.event_counter
def add_event(self, event_type, event_options, **kwds):
"""Add an event type to history
**Arguments**:
- *event_type* = string : type of event, legal options to date are:
'stratigraphy', 'fault', 'fold', 'unconformity'
- *event_options* = list : required options to create event (event dependent)
**Optional keywords**:
- *event_num* = int : event number (default: implicitly defined with increasing counter)
"""
event_num = kwds.get("event_num", self.get_ev_counter())
if event_type == 'stratigraphy':
ev = self._create_stratigraphy(event_options)
ev.event_type = 'STRATIGRAPHY'
elif event_type == 'fault':
ev = self._create_fault(event_options)
ev.event_type = 'FAULT'
elif event_type == 'tilt': # AK
ev = self._create_tilt(event_options)
ev.event_type = 'TILT'
elif event_type == 'unconformity': # AK
ev = self._create_unconformity(event_options)
ev.event_type = 'UNCONFORMITY'
else:
raise NameError('Event type %s not (yet) implemented' % event_type)
ev.set_event_number(event_num)
self.events[event_num] = ev
# update beginning and ending of events in history
self.all_events_end = self.all_events_end + len(ev.event_lines)
# add event to history lines, as well (for consistency with other methods)
self.history_lines[:self.all_events_begin] + \
ev.event_lines + \
self.history_lines[self.all_events_end:]
def _create_header(self):
"""Create model header, include actual date"""
t = time.localtime() # get current time
time_string = "%d/%d/%d %d:%d:%d" % (t.tm_mday,
t.tm_mon,
t.tm_year,
t.tm_hour,
t.tm_min,
t.tm_sec)
self.header_lines = """#Filename = """ + self.filename + """
#Date Saved = """ + time_string + """
FileType = 111
Version = 7.11
"""
def _create_stratigraphy(self, event_options):
"""Create a stratigraphy event
**Arguments**:
- *event_options* = list : list of required and optional settings for event
Options are:
'num_layers' = int : number of layers (required)
'layer_names' = list of strings : names for layers (default names otherwise)
'layer_thickness' = list of floats : thicknesses for all layers
"""
ev = events.Stratigraphy()
tmp_lines = [""]
tmp_lines.append("\tNum Layers\t= %d" % event_options['num_layers'])
for i in range(event_options['num_layers']):
"""Add stratigraphy layers"""
layer_name = event_options['layer_names'][i]
cum_thickness = np.cumsum(event_options['layer_thickness'])
layer_lines = _Templates().strati_layer
# now replace required variables
layer_lines = layer_lines.replace("$NAME$", layer_name)
layer_lines = layer_lines.replace("$HEIGHT$", "%.1f" % cum_thickness[i])
layer_lines = layer_lines.replace(" ", "\t")
# split lines and add to event lines list:
for layer_line in layer_lines.split("\n"):
tmp_lines.append(layer_line)
# append event name
tmp_lines.append("""\tName\t= Strat
""")
# event lines are defined in list:
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
ev.num_layers = event_options['num_layers']
return ev
def _create_fault(self, event_options):
"""Create a fault event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of fault event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'dip_dir' = [0,360] : dip direction of fault
'dip' = [0,90] : dip angle of fault
'slip' = float : slip along fault
"""
ev = events.Fault()
tmp_lines = [""]
fault_lines = _Templates.fault
# substitute text with according values
fault_lines = fault_lines.replace("$NAME$", event_options['name'])
fault_lines = fault_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
fault_lines = fault_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % z)
else:
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
fault_lines = fault_lines.replace("$DIP_DIR$", "%.1f" % event_options['dip_dir'])
fault_lines = fault_lines.replace("$DIP$", "%.1f" % event_options['dip'])
fault_lines = fault_lines.replace("$SLIP$", "%.1f" % event_options['slip'])
# now split lines and add as list entries to event lines
# event lines are defined in list:
# split lines and add to event lines list:
for layer_line in fault_lines.split("\n"):
tmp_lines.append(layer_line)
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
# AK 2014-10
def _create_tilt(self, event_options):
"""Create a tilt event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of fault event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'rotation' = [0,360] : dip?
'plunge_direction' = [0,360] : strike of plunge, measured from x axis
'plunge' = float : ?
"""
ev = events.Tilt()
tmp_lines = [""]
tilt_lines = _Templates.tilt
# substitute text with according values
tilt_lines = tilt_lines.replace("$NAME$", event_options['name'])
tilt_lines = tilt_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
tilt_lines = tilt_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
tilt_lines = tilt_lines.replace("$POS_Z$", "%.1f" % z)
else:
tilt_lines = tilt_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
tilt_lines = tilt_lines.replace("$ROTATION$", "%.1f" % event_options['rotation'])
tilt_lines = tilt_lines.replace("$PLUNGE_DIRECTION$", "%.1f" % event_options['plunge_direction'])
tilt_lines = tilt_lines.replace("$PLUNGE$", "%.1f" % event_options['plunge'])
# now split lines and add as list entries to event lines
# event lines are defined in list:
# split lines and add to event lines list:
for tilt_line in tilt_lines.split("\n"):
tmp_lines.append(tilt_line)
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
# AK 2014-10
def _create_unconformity(self, event_options):
"""Create a unconformity event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of unconformity event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'rotation' = [0,360] : dip?
'plunge_direction' = [0,360] : strike of plunge, measured from x axis
'plunge' = float : ?
"""
ev = events.Unconformity()
tmp_lines = [""]
unconformity_lines = _Templates.unconformity
# substitute text with according values
unconformity_lines = unconformity_lines.replace("$NAME$", event_options['name'])
unconformity_lines = unconformity_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
unconformity_lines = unconformity_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
unconformity_lines = unconformity_lines.replace("$POS_Z$", "%.1f" % z)
else:
unconformity_lines = unconformity_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
unconformity_lines = unconformity_lines.replace("$DIP_DIRECTION$", "%.1f" % event_options['dip_direction'])
unconformity_lines = unconformity_lines.replace("$DIP$", "%.1f" % event_options['dip'])
# split lines and add to event lines list:
for unconformity_line in unconformity_lines.split("\n"):
tmp_lines.append(unconformity_line)
# unconformity has a stratigraphy block
tmp_lines.append("\tNum Layers\t= %d" % event_options['num_layers'])
for i in range(event_options['num_layers']):
"""Add stratigraphy layers"""
layer_name = event_options['layer_names'][i]
cum_thickness = np.cumsum(event_options['layer_thickness'])
layer_lines = _Templates().strati_layer
# now replace required variables
layer_lines = layer_lines.replace("$NAME$", layer_name)
layer_lines = layer_lines.replace("$HEIGHT$", "%.1f" % cum_thickness[i])
layer_lines = layer_lines.replace(" ", "\t")
# split lines and add to event lines list:
for layer_line in layer_lines.split("\n"):
tmp_lines.append(layer_line)
# append event name
tmp_lines.append("""\tName\t= Strat""")
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
def change_event_params(self, changes_dict):
"""Change multiple event parameters according to settings in changes_dict
**Arguments**:
- *changes_dict* = dictionary : entries define relative changes for (multiple) parameters
Per default, the values in the dictionary are added to the event parameters.
"""
# print changes_dict
for key,sub_dict in changes_dict.items():
for sub_key, val in sub_dict.items():
self.events[key].properties[sub_key] += val
def write_history(self, filename):
"""Write history to new file
**Arguments**:
- *filename* = string : filename of new history file
.. hint:: Just love it how easy it is to 'write history' with Noddy ;-)
"""
# first: create header
self.filename = filename
self._create_header()
# initialise history lines
history_lines = []
# add header
for line in self.header_lines.split("\n"):
history_lines.append(line + "\n")
# add number of events
history_lines.append("No of Events\t= %d\n" % len(self.events))
# add events
for event_id in sorted(self.events.keys()):
for line in self.events[event_id].event_lines:
history_lines.append(line)
# add footer
for line in _Templates().footer.split("\n"):
line = line.replace(" ", "\t")
history_lines.append(line + "\n")
f = open(filename, 'w')
for line in history_lines:
f.write(line)
f.close()
# print history_lines
#
# # First step: update history lines with events
# all_event_lines = []
# for event_id in sorted(self.events.keys()):
# for line in self.events[event_id].event_lines:
# all_event_lines.append(line)
# # now substitute old with new lines:
# self.history_lines[self.all_events_begin:self.all_events_end+1] = all_event_lines
#
#
# f = open(filename, 'w')
# for line in self.history_lines:
# f.write(line)
# f.close()
#
#===============================================================================
# Templates for Noddy history file
#===============================================================================
class _Templates():
header = """#Filename = simple_two_faults.his
#Date Saved = 24/3/2014 14:21:0
FileType = 111
Version = 7.11"""
strati_layer = """ Unit Name = $NAME$
Height = $HEIGHT$
Apply Alterations = ON
Density = 4.00e+000
Anisotropic Field = 0
MagSusX = 1.60e-003
MagSusY = 1.60e-003
MagSusZ = 1.60e-003
MagSus Dip = 9.00e+001
MagSus DipDir = 9.00e+001
MagSus Pitch = 0.00e+000
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-003
Color Name = Color 92
Red = 0
Green = 153
Blue = 48 """
fault = """ Geometry = Translation
Movement = Hanging Wall
X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Dip Direction = $DIP_DIR$
Dip = $DIP$
Pitch = 90.00
Slip = $SLIP$
Rotation = 30.00
Amplitude = 2000.00
Radius = 1000.00
XAxis = 2000.00
YAxis = 2000.00
ZAxis = 2000.00
Cyl Index = 0.00
Profile Pitch = 90.00
Color Name = Custom Colour 8
Red = 0
Green = 0
Blue = 254
Fourier Series
Term A 0 = 0.00
Term B 0 = 0.00
Term A 1 = 0.00
Term B 1 = 1.00
Term A 2 = 0.00
Term B 2 = 0.00
Term A 3 = 0.00
Term B 3 = 0.00
Term A 4 = 0.00
Term B 4 = 0.00
Term A 5 = 0.00
Term B 5 = 0.00
Term A 6 = 0.00
Term B 6 = 0.00
Term A 7 = 0.00
Term B 7 = 0.00
Term A 8 = 0.00
Term B 8 = 0.00
Term A 9 = 0.00
Term B 9 = 0.00
Term A 10 = 0.00
Term B 10 = 0.00
Name = Fault Plane
Type = 1
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 6.280000
Min Y Scale = -1.000000
Max Y Scale = 1.000000
Scale Origin = 0.000000
Min Y Replace = -1.000000
Max Y Replace = 1.000000
Num Points = 21
Point X = 0
Point Y = 0
Point X = 31
Point Y = 30
Point X = 62
Point Y = 58
Point X = 94
Point Y = 80
Point X = 125
Point Y = 94
Point X = 157
Point Y = 99
Point X = 188
Point Y = 95
Point X = 219
Point Y = 81
Point X = 251
Point Y = 58
Point X = 282
Point Y = 31
Point X = 314
Point Y = 0
Point X = 345
Point Y = -31
Point X = 376
Point Y = -59
Point X = 408
Point Y = -81
Point X = 439
Point Y = -95
Point X = 471
Point Y = -100
Point X = 502
Point Y = -96
Point X = 533
Point Y = -82
Point X = 565
Point Y = -59
Point X = 596
Point Y = -32
Point X = 628
Point Y = -1
Alteration Type = NONE
Num Profiles = 12
Name = Density
Type = 2
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = 0.000000
Max Y Scale = 4.000000
Scale Origin = 1.000000
Min Y Replace = 0.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = -50
Point X = 628
Point Y = -50
Name = Anisotropy
Type = 3
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - X Axis (Sus)
Type = 4
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Y Axis (Sus)
Type = 5
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Z Axis (Sus)
Type = 6
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Dip (Sus)
Type = 7
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -180.000000
Max Y Scale = 180.000000
Scale Origin = 1.000000
Min Y Replace = -180.000000
Max Y Replace = 180.000000
Num Points = 2
Point X = 0
Point Y = 1
Point X = 628
Point Y = 1
Name = - Dip Dir (Sus)
Type = 8
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Pitch (Sus)
Type = 9
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = Remanence
Type = 10
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Declination (Rem)
Type = 11
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Inclination (Rem)
Type = 12
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Intensity (Rem)
Type = 13
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = -5.000000
Max Y Replace = 5.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Surface Type = FLAT_SURFACE
Surface Filename =
Surface Directory = \\psf\Home
Surface XDim = 0.000000
Surface YDim = 0.000000
Surface ZDim = 0.000000
Name = $NAME$"""
# AK 2014-10
tilt = """X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Rotation = $ROTATION$
Plunge Direction = $PLUNGE_DIRECTION$
Plunge = $PLUNGE$
Name = $NAME$"""
unconformity = """X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Dip Direction = $DIP_DIRECTION$
Dip = $DIP$
Alteration Type = NONE
Num Profiles = 1
Name =
Type = 0
Join Type = LINES
Graph Length = 0.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = 0.000000
Max Y Scale = 0.000000
Scale Origin = 0.000000
Min Y Replace = 0.000000
Max Y Replace = 0.000000
Num Points = 0
Surface Type = FLAT_SURFACE
Surface Filename =
Surface Directory = /tmp_mnt/sci6/users/mark/Atlas/case
Surface XDim = 0.000000
Surface YDim = 0.000000
Surface ZDim = 0.000000"""
temp = """
Num Layers = 5
Unit Name = UC Base
Height = -32000
Apply Alterations = ON
Density = 3.50e+00
Anisotropic Field = 0
MagSusX = 1.50e-06
MagSusY = 1.60e-03
MagSusZ = 1.60e-03
MagSus Dip = 9.00e+01
MagSus DipDir = 9.00e+01
MagSus Pitch = 0.00e+00
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-03
Color Name = Color 98
Red = 84
Green = 153
Blue = 0
Unit Name = UC Layer 1
Height = 5650
Apply Alterations = ON
Density = 3.50e+00
Anisotropic Field = 0
MagSusX = 1.50e-06
MagSusY = 1.60e-03
MagSusZ = 1.60e-03
MagSus Dip = 9.00e+01
MagSus DipDir = 9.00e+01
MagSus Pitch = 0.00e+00
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-03
Color Name = Color 68
Red = 204
Green = 117
Blue = 0
Name = $NAME$"""
# everything below events
footer = """#BlockOptions
Number of Views = 1
Current View = 0
NAME = Default
Origin X = 0.00
Origin Y = 0.00
Origin Z = 5000.00
Length X = 10000.00
Length Y = 7000.00
Length Z = 5000.00
Geology Cube Size = 50.00
Geophysics Cube Size = 50.00
#GeologyOptions
Scale = 10.00
SectionDec = 90.00
WellDepth = 5000.00
WellAngleZ = 0.00
BoreholeX = 0.00
BoreholeX = 0.00
BoreholeX = 5000.00
BoreholeDecl = 90.00
BoreholeDip = 0.00
BoreholeLength = 5000.00
SectionX = 0.00
SectionY = 0.00
SectionZ = 5000.00
SectionDecl = 90.00
SectionLength = 10000.00
SectionHeight = 5000.00
topofile = FALSE
Topo Filename =
Topo Directory = .
Topo Scale = 1.00
Topo Offset = 0.00
Topo First Contour = 100.00
Topo Contour Interval = 100.00
Chair Diagram = FALSE
Chair_X = 5000.00
Chair_Y = 3500.00
Chair_Z = 2500.00
#GeophysicsOptions
GPSRange = 0
Declination = 0.00
Inclination = -67.00
Intensity = 63000.00
Field Type = FIXED
Field xPos = 0.00
Field yPos = 0.00
Field zPos = 5000.00
Inclination Ori = 0.00
Inclination Change = 0.00
Intensity Ori = 90.00
Intensity Change = 0.00
Declination Ori = 0.00
Declination Change = 0.00
Altitude = 80.00
Airborne= FALSE
Calculation Method = SPATIAL
Spectral Padding Type = RECLECTION_PADDING
Spectral Fence = 100
Spectral Percent = 100
Constant Boxing Depth = 0.00
Clever Boxing Ratio = 1.00
Deformable Remanence= FALSE
Deformable Anisotropy= TRUE
Vector Components= FALSE
Project Vectors= TRUE
Pad With Real Geology= FALSE
Draped Survey= FALSE
#3DOptions
Declination = 150.000000
Elevation = 30.000000
Scale = 1.000000
Offset X = 1.000000
Offset Y = 1.000000
Offset Z = 1.000000
Fill Type = 2
#ProjectOptions
Susceptibility Units = CGS
Geophysical Calculation = 2
Calculation Type = LOCAL_JOB
Length Scale = 0
Printing Scale = 1.000000
Image Scale = 10.000000
New Windows = FALSE
Background Red Component = 254
Background Green Component = 254
Background Blue Component = 254
Internet Address = 255.255.255.255
Account Name =
Noddy Path = ./noddy
Help Path = iexplore %h
Movie Frames Per Event = 3
Movie Play Speed = 10.00
Movie Type = 0
Gravity Clipping Type = RELATIVE_CLIPPING
Gravity Image Display Clip Min = 0.000000
Gravity Image Display Clip Max = 100.000000
Gravity Image Display Type = GREY
Gravity Image Display Num Contour = 25
Magnetics Clipping Type = RELATIVE_CLIPPING
Magnetics Image Display Clip Min = 0.000000
Magnetics Image Display Clip Max = 100.000000
Magnetics Image Display Type = GREY
Magnetics Image Display Num Contour = 25
False Easting = 0.000000
False Northing = 0.000000
#Window Positions
Num Windows = 16
Name = Block Diagram
X = 60
Y = 60
Width = 500
Height = 300
Name = Movie
X = 60
Y = 60
Width = -1
Height = -1
Name = Well Log
X = 60
Y = 60
Width = 400
Height = 430
Name = Section
X = 14
Y = 16
Width = 490
Height = -1
Name = Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Stratigraphy
X = 60
Y = 60
Width = 490
Height = 375
Name = Line Map
X = 60
Y = 60
Width = 490
Height = -1
Name = Profile - From Image
X = 60
Y = 60
Width = 490
Height = 600
Name = Sterographic Projections
X = 60
Y = 60
Width = 430
Height = 430
Name = Stratigraphic Column
X = 60
Y = 60
Width = 230
Height = 400
Name = Image
X = 30
Y = 30
Width = -1
Height = -1
Name = Contour
X = 30
Y = 30
Width = -1
Height = -1
Name = Toolbar
X = 10
Y = 0
Width = -1
Height = -1
Name = History
X = 229
Y = 160
Width = 762
Height = 898
Name = History
X = 229
Y = 160
Width = 762
Height = 898
#Icon Positions
Num Icons = 3
Row = 1
Column = 1
X Position = 1
Y Position = 1
Row = 1
Column = 2
X Position = 4
Y Position = 1
Row = 1
Column = 3
X Position = 7
Y Position = 1
Floating Menu Rows = 1
Floating Menu Cols = 24
End of Status Report"""
if __name__ == '__main__':
# some testing and debugging:
import os
os.chdir(r'/Users/Florian/git/pynoddy/sandbox')
H1 = NoddyHistory("../examples/simple_two_faults.his")
H1.swap_events(2, 3)
H1.write_history("test")
H2 = NoddyHistory("test")
H2.events[2].properties['Dip'] = 12
H2.write_history("test2")
| gpl-2.0 |
idlead/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 47 | 12381 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
wwf5067/statsmodels | docs/sphinxext/ipython_directive.py | 30 | 27623 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
import time
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
#handle try/except blocks. only catch outer except
if re.match(continuation + '\sexcept:', nextline):
inputline += '\n' + nextline[Nc+1:]
else:
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
block = '\n'.join(content)
# remove blank lines
block = re.sub('\n+', '\n', block)
content = block.split('\n')
# if any figures, make sure you can handle them and no other figures exist
if re.search('^\s*@savefig', block, flags=re.MULTILINE):
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
# sub out the pseudo-decorators so we can parse
block = re.sub('@(?=[savefig|suppress|verbatim|doctest])', '#@', block)
# this is going to raise an error if there's problems
# in the python. if you want errors, make an ipython block
parsed_block = ast.parse(block)
in_lines = [i.lineno for i in parsed_block.body]
output = []
ct = 1
for lineno, line in enumerate(content):
line_stripped = line.strip('\n')
if lineno + 1 in in_lines: # this is an input line
modified = u"%s %s" % (fmtin % ct, line_stripped)
ct += 1
elif line.startswith('@'): # is it a decorator?
modified = line
else: # this is something else
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
modified = u'%s %s' % (continuation, line)
output.append(modified)
output = re.sub('#@(?=[savefig|suppress|verbatim|doctest])', '@',
'\n'.join(output)).split('\n')
# put blank lines after input lines
for i in in_lines[1:][::-1]:
output.insert(i-1, u'')
# fix the spacing for decorators
# might be a cleaner regex for
# \n@savefig name.png\n\n -> \n\n@savefig name.png\n
decpat1 = '(?<=@[savefig|suppress|verbatim|doctest])(?P<options>.+)\n\n'
output = re.sub(decpat1, '\g<options>\n','\n'.join(output))
decpat2 = '\n(?=@[savefig|suppress|verbatim|doctest])'
output = re.sub(decpat2, '\n\n', output).split('\n')
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# make a file in this directory, if there's already one
# if it's older than 5 minutes, delete it
# this needs a more robust solution
cur_dir = os.path.normpath(
os.path.join(self.state.document.settings.env.srcdir,
'..'))
tmp_file = os.path.join(cur_dir, 'seen_docs.temp')
if os.path.exists(tmp_file):
file_t = os.path.getmtime(tmp_file)
now_t = time.time()
if (now_t - file_t)/60. >= 5:
docs = []
os.remove(tmp_file)
else:
docs = open(tmp_file, 'r').read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fout = open(tmp_file, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
#self.teardown() # this gets called on _every_ exit from a block
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| bsd-3-clause |
konraddysput/BioDocumentAnalysis | vocabularytester/similarity.py | 1 | 2444 | from abc import ABCMeta, abstractmethod
import pandas as pd
from typing import List, Dict
import numpy as np
from scipy import spatial
class SimRegression(metaclass=ABCMeta):
@abstractmethod
def calculate_similarity(self, vector_a: np.ndarray, vector_b: np.ndarray) -> float:
...
class EuclideanSimilarity(SimRegression):
def calculate_similarity(self, vector_a: np.ndarray, vector_b: np.ndarray) -> float:
return spatial.distance.euclidean(vector_a, vector_b)
class NormalizedEuclideanSimilarity(EuclideanSimilarity):
@staticmethod
def _normalize(vector: np.ndarray):
return vector / np.sqrt(vector.dot(vector))
def calculate_similarity(self, vector_a: np.ndarray, vector_b: np.ndarray) -> float:
return spatial.distance.euclidean(self._normalize(vector_a), self._normalize(vector_b))
class CosineSimilarity(SimRegression):
def calculate_similarity(self, vector_a: np.ndarray, vector_b: np.ndarray) -> float:
return spatial.distance.cosine(vector_a, vector_b)
class CosineSigmoidSimilarity(SimRegression):
def __init__(self, a: float, c: float):
self._a = a
self._c = c
def _sigmoid(self, x: float) -> float:
return 1 / (1 + 2.71828182846 ** (-self._a * (x - self._c)))
def calculate_similarity(self, vector_a: np.ndarray, vector_b: np.ndarray) -> float:
return self._sigmoid(spatial.distance.cosine(vector_a, vector_b))
class SimilarityCalculator:
def __init__(self, vocabulary_path: str, vocabulary_length: int, similarity_function: SimRegression):
words: List[str] = pd.read_csv(vocabulary_path, sep=' ', quoting=3, header=None, usecols=(0,),
na_filter=False).values.squeeze()
self._dictionary: Dict[str, int] = dict(zip(words, range(len(words))))
self._vectors: np.ndarray = pd.read_csv(vocabulary_path, sep=' ', quoting=3, header=None,
usecols=range(1, vocabulary_length + 1), na_filter=False,
dtype=np.float32).values
self._similarity = similarity_function
def calculate_similarity(self, first_word: str, second_word: str) -> float:
return self._similarity.calculate_similarity(self._vectors[self._dictionary[first_word]],
self._vectors[self._dictionary[second_word]])
| mit |
theislab/scanpy_usage | 170503_zheng17/html/compile_profiling_info.py | 1 | 6040 | import re
from glob import glob
import pandas as pd
import numpy as np
from matplotlib import pyplot as pl
from natsort import natsorted
import seaborn as sns
from matplotlib import rcParams
# dictionary to init dataframe that stores all the information
df = {}
df['toolkit'] = []
df['n cells'] = []
df['step'] = []
df['total memory (GB)'] = []
df['memory_change'] = []
df['CPU time (min)'] = []
# match html by R output
for filename in natsorted(glob('zheng17*cellranger_R*.html')):
n_cells = int(re.findall('[0-9.]+', filename)[2])
textfile = open(filename, 'r')
filetext = textfile.read()
textfile.close()
# memory
matches = re.findall('[0-9.]+ .B<', filetext)
mem_tot = []
mem_change = [0]
for im, m in enumerate(matches):
num = float(re.findall('[0-9.]+', m)[0])
if 'MB' in m: num /= 1000
if im == 0: mem_tot += [num]
else:
if im % 2 == 1:
mem_tot += [num]
else: mem_change += [num]
df['total memory (GB)'] += mem_tot
df['memory_change'] += mem_change
# cpu time
matches = re.findall('elapsed:.+ [0-9.]+', filetext)
df['CPU time (min)'] += [0]
df['CPU time (min)'] += [float(re.findall('[0-9.]+', m)[0])/60 for m in matches]
# update step and all other fields
df['memory_change'] += [np.nan, np.nan]
df['total memory (GB)'] += [np.nan, np.nan]
df['CPU time (min)'] += [np.nan, np.nan]
# type
df['toolkit'] += ['Cell Ranger (R)' for i in range(7)]
# general
df['step'] += ['init', 'load', 'Preprocessing', 'PCA', 'tSNE', 'diffmap', 'DPT']
df['n cells'] += [n_cells for i in range(7)]
def extract_minutes(string):
match_min = re.findall('[0-9.]+ *min', string)
match_s = re.findall('[0-9.]+ *s', string)
match_ms = re.findall('[0-9.]+ *ms', string)
min = float(match_min[0].replace('min', '').strip()) if match_min else 0
min += float(match_s[0].replace('s', '').strip())/60 if match_s else 0
min += float(match_ms[0].replace('ms', '').strip())/60/1000 if match_ms else 0
return min
def extract_memory(string):
l = string.split(' GB, difference ')
return float(l[0]), float(l[1].replace(' GB', ''))
# match html by Scanpy output
for filename in natsorted(glob('zheng17*cellranger_Py*.html')):
n_cells = int(re.findall('[0-9.]+', filename)[2])
print(n_cells)
textfile = open(filename, 'r')
filetext = textfile.read()
textfile.close()
# cpu time
matches = re.findall('Wall time:.+', filetext)
df['CPU time (min)'] += [0]
df['CPU time (min)'] += [extract_minutes(m) for m in matches]
# memory
matches = re.findall('[0-9.]+ GB, difference [+\-0-9.]+ GB', filetext)
df['total memory (GB)'] += [extract_memory(m)[0] for m in matches]
df['memory_change'] += [extract_memory(m)[1] for m in matches]
# type
df['toolkit'] += ['Scanpy (Py)' for i in range(7)]
# general
df['step'] += ['init', 'load', 'Preprocessing', 'PCA', 'tSNE', 'diffmap', 'DPT']
df['n cells'] += [n_cells for i in range(7)]
df = pd.DataFrame(df)
df_single = df.loc[df['toolkit'] != 'Cell Ranger (R)']
df_single = df_single.loc[df['step'] != 'init']
df_single = df_single.loc[df['step'] != 'load']
df_single = df_single.loc[df['step'] != 'Preprocessing']
df_single = df_single.loc[df['step'] != 'PCA']
df_single = df_single.loc[df['step'] != 'tSNE']
# remove uninteresting steps
df = df.loc[df['step'] != 'init']
df = df.loc[df['step'] != 'load']
df = df.loc[df['step'] != 'DPT']
df = df.loc[df['step'] != 'diffmap']
g = sns.FacetGrid(df, col='step', hue='toolkit', sharey=False, legend_out=True)
g = g.map(pl.scatter, 'n cells', 'total memory (GB)')
pl.subplots_adjust(top=0.82, right=0.82)
pl.legend(bbox_to_anchor=(1.04, 0.5), loc=2, borderaxespad=0.)
g.fig.suptitle('Process memory after step')
pl.savefig('figs/memory.png', dpi=400)
# g = sns.FacetGrid(df, col='step', hue='toolkit', sharey=False)
# g = g.map(pl.scatter, 'n cells', 'memory_change')
# pl.subplots_adjust(top=0.82, right=0.82)
# pl.legend(bbox_to_anchor=(1.04, 0.5), loc=2, borderaxespad=0.)
# g.fig.suptitle('changed memory during step (GB)')
g = sns.FacetGrid(df, col='step', hue='toolkit', sharey=False)
g = g.map(pl.scatter, 'n cells', 'CPU time (min)')
pl.subplots_adjust(top=0.82, right=0.82)
pl.legend(bbox_to_anchor=(1.04, 0.5), loc=2, borderaxespad=0.)
g.fig.suptitle('CPU time of step')
pl.savefig('figs/cpu_time.png', dpi=400)
# compute Speedup
df1 = pd.DataFrame()
df1['step'] = df['step'][df['toolkit'] == 'Scanpy (Py)'].values
df1['n cells'] = df['n cells'][df['toolkit'] == 'Scanpy (Py)'].values
df1['Speedup'] = df['CPU time (min)'][df['toolkit'] == 'Cell Ranger (R)'].values / df['CPU time (min)'][df['toolkit'] == 'Scanpy (Py)'].values
df1['memory ratio'] = 1 / df['total memory (GB)'][df['toolkit'] == 'Cell Ranger (R)'].values * df['total memory (GB)'][df['toolkit'] == 'Scanpy (Py)'].values
g = sns.FacetGrid(df1, col='step', sharey=False)
g = g.map(pl.scatter, 'n cells', 'Speedup', color='grey')
pl.subplots_adjust(top=0.82, right=0.82)
g.fig.suptitle('Speedup Scanpy vs. Cell Ranger (Zheng el al., 2017)')
pl.savefig('figs/speedup.png', dpi=400)
pl.savefig('figs/speedup.pdf')
g = sns.FacetGrid(df1, hue='step')
g = g.map(pl.plot, 'n cells', 'Speedup', marker='o').add_legend(title=False, handlelength=0.3, frameon=True)
pl.subplots_adjust(top=0.82, right=0.82)
g.fig.suptitle('Speedup Scanpy vs. Cell Ranger')
pl.savefig('figs/speedup_single_panel.pdf')
g = sns.FacetGrid(df1, col='step', sharey=False)
g = g.map(pl.scatter, 'n cells', 'memory ratio', color='grey')
pl.subplots_adjust(top=0.82, right=0.82)
g.fig.suptitle('Memory ratio Scanpy vs. Cell Ranger (Zheng el al., 2017)')
pl.savefig('figs/memory_ratio.png', dpi=400)
# scaling DPT and diffmap
g = sns.FacetGrid(df_single, col='step', sharey=False)
g = g.map(pl.scatter, 'n cells', 'CPU time (min)')
pl.subplots_adjust(top=0.82, right=0.82)
g.fig.suptitle('CPU time Diffmap and DPT')
pl.savefig('figs/cpu_time_dpt.png', dpi=400)
| bsd-3-clause |
spxtr/test-infra | mungegithub/issue_labeler/simple_app.py | 6 | 4947 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from logging.handlers import RotatingFileHandler
# pylint: disable=import-error
import numpy as np
from flask import Flask, request
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
# pylint: enable=import-error
APP = Flask(__name__)
# parameters
TEAM_FN = './models/trained_teams_model.pkl'
COMPONENT_FN = './models/trained_components_model.pkl'
LOG_FILE = '/tmp/issue-labeler.log'
LOG_SIZE = 1024*1024*100
NUM_FEATURES = 262144
MY_LOSS = 'hinge'
MY_ALPHA = .1
MY_PENALTY = 'l2'
MY_HASHER = FeatureHasher(input_type='string', n_features=NUM_FEATURES, non_negative=True)
MY_STEMMER = PorterStemmer()
TOKENIZER = RegexpTokenizer(r'\w+')
STOPWORDS = []
try:
if not STOPWORDS:
STOPWORDS_FILENAME = './stopwords.txt'
with open(STOPWORDS_FILENAME, 'r') as fp:
STOPWORDS = list([word.strip() for word in fp])
except: # pylint:disable=bare-except
# don't remove any stopwords
STOPWORDS = []
@APP.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@APP.route("/", methods=['POST'])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', '')
body = request.form.get('body', '')
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(TEAM_FN)
comp_mod = joblib.load(COMPONENT_FN)
vec = MY_HASHER.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(input_string):
input_string = input_string.encode('utf-8')
cur_title_body = TOKENIZER.tokenize(input_string.decode('utf-8').lower())
return [MY_STEMMER.stem(x) for x in cur_title_body if x not in STOPWORDS]
@APP.route("/update_models", methods=['PUT'])
def update_model(): # pylint: disable=too-many-locals
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
t_tokens = []
c_tokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
t_label = [x for x in label_list if x.startswith('team')]
c_label = [x for x in label_list if x.startswith('component')]
tokens = tokenize_stem_stop(" ".join([title, body]))
if t_label:
team_labels += t_label
t_tokens += [tokens]
if c_label:
component_labels += c_label
c_tokens += [tokens]
t_vec = MY_HASHER.transform(t_tokens)
c_vec = MY_HASHER.transform(c_tokens)
if team_labels:
if os.path.isfile(TEAM_FN):
team_model = joblib.load(TEAM_FN)
team_model.partial_fit(t_vec, np.array(team_labels))
else:
# no team model stored so build a new one
team_model = SGDClassifier(loss=MY_LOSS, penalty=MY_PENALTY, alpha=MY_ALPHA)
team_model.fit(t_vec, np.array(team_labels))
if component_labels:
if os.path.isfile(COMPONENT_FN):
component_model = joblib.load(COMPONENT_FN)
component_model.partial_fit(c_vec, np.array(component_labels))
else:
# no comp model stored so build a new one
component_model = SGDClassifier(loss=MY_LOSS, penalty=MY_PENALTY, alpha=MY_ALPHA)
component_model.fit(c_vec, np.array(component_labels))
joblib.dump(team_model, TEAM_FN)
joblib.dump(component_model, COMPONENT_FN)
return ""
def configure_logger():
log_format = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(LOG_FILE, maxBytes=LOG_SIZE, backupCount=3)
formatter = logging.Formatter(log_format)
file_handler.setFormatter(formatter)
APP.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
APP.run(host="0.0.0.0")
| apache-2.0 |
VEVO/hidi | hidi/inout.py | 1 | 2636 | import numpy as np
import pandas as pd
from hidi.transform import Transform
class ReadTransform(Transform):
"""
Read input csv data from disk.
Input data should be a csv file formatted with three
columns: :code:`link_id`, :code:`item_id`, and
:code:`score`. If score is not provided, it we be
defaulted to one. :code:`link_id` represents to the
"user" and `item_id` represents the "item" in the context
of traditional collaborative filtering.
:param infiles:
Array of paths to csv documents to be loaded
and concatenated into one DataFrame. Each csv
document must have a :code:`link_id` and a
:code:`item_id` column. An optional
:code:`score` column may also be supplied.
:type infiles: array
"""
def __init__(self, infiles, **kwargs):
self._inputs = infiles
def _normalize(self, df):
if 'score' not in df.columns:
df['score'] = np.ones(df.shape[0])
return df[['link_id', 'item_id', 'score']]
def transform(self, **kwargs):
"""
Read in files from the :code:`infiles` array given
upon instantiation.
:rtype: pandas.DataFrame
"""
dfs = [pd.read_csv(inp) for inp in self._inputs]
dfs = [self._normalize(df) for df in dfs]
return pd.concat(dfs), kwargs
class WriteTransform(Transform):
"""
Write output to disk in csv or json formats.
:param outfile: A string that is a path to the desired
output on the file system.
:type outfile: str
:param file_format: A string that is a file extension,
either :code:`json` or :code:`csv`.
:type file_format: str
"""
def __init__(self, outfile, file_format='csv',
enc=None, link_key='link_id'):
self.outfile = outfile
self.file_format = file_format
self.link_key = link_key
self.encoding = enc
def transform(self, df, **kwargs):
"""
Write a DataFrame to a file.
:param df: The Pandas DataFrame to be written to a
file
:type df: pandas.DataFrame
:rtype: pandas.DataFrame
"""
if self.file_format == 'csv':
df.to_csv(self.outfile, encoding=self.encoding)
else:
with open(self.outfile, 'w+') as f:
import json
for row in df.iterrows():
f.write(json.dumps({
self.link_key: row[0],
'factors': row[1].tolist()
}))
f.write('\n')
return df, kwargs
| apache-2.0 |
sommerc/cellcognition_explorer_cedl | cedl_cmd/cedl.py | 1 | 18798 | #!/usr/bin/env python
'''
CellCognition Explorer - deep learning command-line extension
'''
import os
import sys
import cellh5
import h5py
import numpy
from numpy.lib.recfunctions import merge_arrays
import pandas
import logging
logger = logging.getLogger(__name__)
from autoencoders import Autoencoder, AdaGradTrainer, NestorovTrainer
import argparse
version = (1, 0, 1)
class BaseReader(object):
@staticmethod
def normalize_galleries(gals):
return gals.astype(numpy.float32) / 255.
def write_galleriy_file(self, gals):
output_file_name = self.get_output_file_name()
norm_gals = self.normalize_galleries(gals[:self.pargs.nsamples, ...])
with h5py.File(output_file_name, 'w') as h:
h.create_dataset('galleries', data=norm_gals)
def load_galleriy_file(self):
output_file_name = self.get_output_file_name()
with h5py.File(output_file_name, 'r') as h:
res = h['galleries'].value[:, None, ...]
return res
def get_output_file_name(self):
return '%s_galleries_%s_%dx%d_%dk.h5' % (self.name, self.pargs.neg_condition, self.pargs.im_size,
self.pargs.im_size, self.pargs.nsamples / 1000)
def iter_pos(self):
raise NotImplementedError()
class Cellh5Reader(BaseReader):
def __init__(self, name, pargs):
self.pargs = pargs
self.cellh5_input = pargs.cellh5_input
self.cellh5_mapping = pargs.cellh5_mapping
self.im_size = pargs.im_size
self.name = name
logger.debug("Check input files")
if not os.path.isfile(self.cellh5_input): sys.exit("Error: Cellh5 file does not exist")
if not os.path.isfile(self.cellh5_mapping): sys.exit("Error: Mapping file does not exist")
def read_training_images(self):
cr = self.cr = cellh5.CH5MappedFile(self.cellh5_input, 'r', cached=False)
logger.debug(" Read plate mapping")
cr.read_mapping(self.cellh5_mapping)
neg_mapping = cr.mapping[cr.mapping["Group"] == self.pargs.neg_condition]
result_galleries = []
cnt = 0
logger.debug(" Reading galleries from negative control conditions:")
while True:
rn = numpy.random.randint(len(neg_mapping))
plate, well, site, gene = neg_mapping.iloc[rn][["Plate", "Well", "Site", "Gene Symbol"]]
ch5_pos = cr.get_position(well, site)
n_cells = len(ch5_pos.get_object_idx())
logger.debug(' {} {} {} {} {}'.format(plate, well, site, gene, n_cells))
if n_cells == 0:
continue
nr = numpy.random.randint(1, n_cells)
gals = ch5_pos.get_gallery_image(range(nr), size=self.im_size).T.reshape((nr, self.im_size,self.im_size))
result_galleries.append(gals)
cnt += nr
if cnt > self.pargs.nsamples:
break
return numpy.concatenate(result_galleries)
def extract_galleries(self, pos, object_='primary__primary'):
n_cells = len(pos.get_object_idx())
cell_idx = range(n_cells)
im_size = self.pargs.im_size
img = None
if len(cell_idx) > 0:
img = pos.get_gallery_image(cell_idx, object_=('primary__primary'), size=im_size)
img = img.reshape((im_size, n_cells,im_size,1)).transpose((1,3,0,2))
return img
def extract_geometry(self, pos, object_='primary__primary'):
n_cells = len(pos.get_object_idx())
if n_cells > 0:
cell_idx = range(n_cells)
centers = pos.get_center(cell_idx, object_=object_)
bbox = pos.get_feature_table(object_, 'bounding_box')[cell_idx]
orient = pos.get_feature_table(object_, 'orientation')[cell_idx]
meta = numpy.zeros(n_cells, dtype=[('file','|S128' ), ('treatment','|S128'), ('label', int )])
return merge_arrays((meta, centers, orient, bbox), asrecarray=True, flatten = True, usemask = False)
def iter_pos(self):
self.cr = cr = cellh5.CH5MappedFile(self.cellh5_input, 'r', cached=False)
cr.read_mapping(self.cellh5_mapping)
for i, row in cr.mapping.iterrows():
yield row, cr.get_position(row.Well, row.Site)
def close(self):
try:
self.cr.close()
except Exception as e:
logger.warn("Warn: Error closing cellh5 input file\n" + str(e))
class Ch5Encoder(object):
def __init__(self, ae, reader, writer):
self.ae = ae
self.reader = reader
self.writer = writer
def encode(self, data):
return self.ae.encode(data)
def write(self, content):
logger.debug(' Write content')
def run(self):
for meta, pos in self.reader.iter_pos():
logger.debug(' {} {} {}'.format(meta.Plate, meta.Well, meta.Site))
ncell = pos.get_object_count()
if ncell == 0:
continue
imgs = self.reader.extract_galleries(pos)
geo = self.reader.extract_geometry(pos)
geo["file"] = self.reader.pargs.cellh5_input[:128]
geo["treatment"] = meta["Gene Symbol"] if "Gene Symbol" in meta else "blub"
geo["label"] = 0
features = self.encode(self.reader.normalize_galleries(imgs))
rec_features = features.view(dtype=numpy.dtype([("ch1-deep_learning_feature_{}".format(dl), 'float32') for dl in xrange(features.shape[1])]))
self.writer.write_bbox(geo)
self.writer.write_contours(geo, bb_size=self.reader.im_size)
self.writer.write_features(rec_features)
self.writer.write_galleries(imgs)
image_width = pos['image']['channel'].shape[3]
image_height = pos['image']['channel'].shape[4]
self.writer.write_feature_groups()
self.writer.gallery.dset.attrs["colors"] = ["#FFFFFF"]
self.writer.gallery.dset.attrs["image_size"] = (image_width, image_height)
class StandardOutputWriter(object):
BBOX_DTYPE = [('file', 'S128'), ('treatment', 'S128'), ('label', '<i4'),
('x', '<i4'), ('y', '<i4'),
('angle', '<f8'), ('eccentricity', '<f8'),
('left', '<i4'), ('right', '<i4'), ('top', '<i4'), ('bottom', '<i4')]
FEATURE_DTYPE = [] # created dynamically
CONTOUR_DTYPE = h5py.special_dtype(vlen=numpy.uint16)
GALLERY_DTYPE = numpy.uint8
def __init__(self, filename, im_size):
self._fh = h5py.File(filename, 'w')
self._fh.attrs["application"] = "CellExplorer_deep_learning_cmd-line-tool-{}.{}.{}".format(*version)
self._fh.attrs["training_data"] = ["data",]
self.data_grp = self._fh.create_group("data")
self.bbox = self.create_writer("bbox", self.data_grp, self.BBOX_DTYPE)
self.contour_grp = self.data_grp.create_group("contours")
self.contour_grp.attrs["channels"] = ["Channel_1"]
self.contours = self.create_writer("Channel_1", self.contour_grp, self.CONTOUR_DTYPE, kind="a", shape=('x', 2), grow_dim=0)
self.gallery = self.create_writer("gallery", self.data_grp, self.GALLERY_DTYPE, kind="a", shape=(im_size, im_size,1,'x'), grow_dim=3)
# not yet initializable
self.features = None
def create_writer(self, name, grp, dtype, kind='c', shape=None, grow_dim=0):
if kind == "c":
return Hdf5IncrementalCompoundWriter(name, grp, dtype)
elif kind == 'a':
return Hdf5IncrementalArrayWriter(name, grp, dtype, shape, grow_dim)
raise AttributeError("HDF5 Writer not supported. Choose Compound or Array, c or a.")
def write_bbox(self, data):
self.bbox.inc_write(data)
def write_contours(self, bbox, bb_size):
sh = bb_size/2
x = numpy.c_[bbox['x']-sh, bbox["x"]+sh, bbox["x"]+sh, bbox["x"]-sh]
y = numpy.c_[bbox['y']-sh, bbox["y"]-sh, bbox["y"]+sh, bbox["y"]+sh]
self.contours.resize_for(x)
# manual
self.contours.dset[self.contours.offset:self.contours.offset+len(x), 0] = x
self.contours.dset[self.contours.offset:self.contours.offset+len(y), 1] = y
self.contours.offset+=len(x)
def write_galleries(self, gals):
self.gallery.inc_write(gals.transpose())
def write_features(self, data):
if self.features is None:
self.features = self.create_writer("features", self.data_grp, self.FEATURE_DTYPE)
self.features.inc_write(data)
def write_feature_groups(self):
fg = self.data_grp.create_dataset("feature_groups", shape=(len(self.FEATURE_DTYPE),), dtype=[('feature', '|S64'), ('Simple1', '|S64')])
fg['feature'] = zip(*self.FEATURE_DTYPE)[0]
fg['Simple1'] = "Simple1"
def close(self):
self.bbox.finalize()
self.contours.finalize()
self.gallery.finalize()
self.features.finalize()
try:
self._fh.close()
except Exception as e:
logger.warn("Error: Problem closing file handle: {}".format(str(e)))
class Hdf5IncrementalCompoundWriter(object):
init_size = 1000
def __init__(self, object_name, obj_grp, dtype):
self.obj_grp = obj_grp
self.dtype = dtype
self.offset = 0
self.object_name = object_name
self.dset = self.obj_grp.create_dataset(self.object_name, shape=(self.init_size,), dtype=self.dtype, maxshape=(None,))
def inc_write(self, data):
if len(data) + self.offset > len(self.dset) :
# resize
self.dset.resize((len(data) + self.offset,))
if len(data.shape) == 2:
data = data[:,0]
self.dset[self.offset:self.offset+len(data)] = data
self.offset+=len(data)
def finalize(self):
self.dset.resize(self.offset, axis=0)
class Hdf5IncrementalArrayWriter(object):
init_size = 1000
def __init__(self, object_name, obj_grp, dtype, shape, grow_dim=0):
self.obj_grp = obj_grp
self.dtype = dtype
self.offset = 0
self.object_name = object_name
self.grow_dim = grow_dim
init_shape = list(shape)
init_shape[grow_dim] = self.init_size
maxshape = list(shape)
maxshape[grow_dim] = None
self.dset = self.obj_grp.create_dataset(self.object_name, shape=init_shape, dtype=self.dtype, maxshape=maxshape)
def resize_for(self, data):
if data.shape[self.grow_dim] + self.offset > self.dset.shape[self.grow_dim] :
# resize
new_shape = list(self.dset.shape)
new_shape[self.grow_dim] = self.dset.shape[self.grow_dim] + data.shape[self.grow_dim]
self.dset.resize(new_shape)
def finalize(self):
final_shape = list(self.dset.shape)
final_shape[self.grow_dim] = self.offset
self.dset.resize(final_shape)
def inc_write(self, data):
self.resize_for(data)
index = [slice(None, None, None), ] * data.ndim
index[self.grow_dim] = slice(self.offset, self.offset + data.shape[self.grow_dim], None)
self.dset[tuple(index)] = data
self.offset+=data.shape[self.grow_dim]
def get_model_name(ch5_input, model_arch):
return "{}_{}".format(os.path.splitext(os.path.basename(ch5_input))[0], model_arch)
def train(args):
name = get_model_name(args.cellh5_input, args.ae_arch)
logger.info("Training: '{}'".format(name))
logger.info("Init reader")
cr = Cellh5Reader(name, args)
logger.info("Open cellh5 file")
galleries = cr.read_training_images()
logger.info("Write images to output file")
cr.write_galleriy_file(galleries)
logger.info("Load images from output file")
galleries = cr.load_galleriy_file()
logger.info("Init deep learning network")
ae = Autoencoder(cr.name, (1, cr.pargs.im_size, cr.pargs.im_size), denoising=cr.pargs.corruption)
logger.info("Set deep learning network architecture to: '{}'".format(cr.pargs.ae_arch))
arch = cr.pargs.ae_arch.replace("_", " ")
ae.init_from_string(arch)
logger.info("Configure trainer: '{}'".format(cr.pargs.learner))
if cr.pargs.learner == "nesterov":
trainer = NestorovTrainer(epochs=cr.pargs.epochs,
batchsize=cr.pargs.batchsize,
learning_rate=cr.pargs.learning_rate,
momentum=cr.pargs.momentum
)
else:
trainer = AdaGradTrainer(epochs=cr.pargs.epochs,
batchsize=cr.pargs.batchsize,
learning_rate=cr.pargs.learning_rate,
)
logger.info("Training: (can take hours...)")
trainer(ae).fit(galleries, None)
logger.info("Save deep learning network as '{}' (use this for encode)".format(name))
ae.save()
def predict(args):
logger.info("Init reader")
cr = Cellh5Reader(args.name, args)
logger.info("Read autoencoder model")
try:
ae = Autoencoder.load(args.name)
except Exception as e:
logging.error("Error loading autoencoder {}".format(str(e)))
sys.exit("Error: Cellh5 file does not exist")
output_file = "{}.hdf".format(args.name)
logger.info("Init output writer -> {}".format(output_file))
wr = StandardOutputWriter(output_file, args.im_size)
wr.FEATURE_DTYPE = [("ch1-deep_learning_feature_{}".format(dl), 'float32') for dl in xrange(ae.get_code_size())]
logger.info("Encode: (can take hours...)")
encoder = Ch5Encoder(ae, cr, wr)
encoder.run()
wr.close()
cr.close()
logger.info("Output file created. Open CellCognition Explorer GUI and open '{}'".format(output_file))
def main(args):
logging.basicConfig(level=args.loglevel or logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.action == "train":
train(args)
elif args.action == "encode":
predict(args)
logger.debug(" --- The End ---")
if __name__ == '__main__':
logger.info("test")
parser = argparse.ArgumentParser(prog="CellExplorer deep learning command line extension", version="{}.{}.{}".format(*version), formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action='store_true', help='verbose output', dest='loglevel')
parser.add_argument('--im_size', '-is', type=int, help='Size of the squared, cropped input images (in pixel)', default=60)
subparsers = parser.add_subparsers()
train_parser = subparsers.add_parser('train', help='Train a deep learning autoencoder from pre-processed image data stored in cellh5.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
train_parser.set_defaults(action="train")
train_parser.add_argument('cellh5_input', help='The cellh5 file as input for training an autoencoder.')
train_parser.add_argument('cellh5_mapping', help='Position mapping table (.txt file) with a "Group" Column to '
'indicate negative control conditions.' )
train_parser.add_argument('--autoencoder_architecture', '-ae', help='String describing the encoding layers of the autoencoder.\n'
'Three layer types are supported:'
'"cF.SA": convolutional layer with F filters of size SxS and activation A (A=r or A=s), '
'"pP" max-pooling layer with pooling size of PxP\n, and '
'"dN.D" fully-connected dense layer with output size N and additional drop-out layer with probability 0.D. '
'Deep autoencoders can be constructed by concatenating layers with "_"'
, default='c16.5r_p2_c32.3r_p2_d256.1r_d64.0s', dest='ae_arch')
train_parser.add_argument('--learner', '-l', choices=['nesterov', 'adagrad'], help='SGD variant. Choose between Nesterov momentum and AdaGrad updates.', default='addgrad')
train_parser.add_argument('--learning_rate', '-lr', type=float, help='Learning rate', default=0.05)
train_parser.add_argument('--momentum', '-m', type=float, help='Momentum for Nestorov updates', default=0.9)
train_parser.add_argument('--batchsize', '-bs' , type=int, help='Mini-batch size', default=128)
train_parser.add_argument('--corruption', '-c' , type=float, help='Initial corruption level of the denoising autoencoder', default=0.0)
train_parser.add_argument('--epochs', '-e' , type=int, help='Number of training epochs', default=16)
train_parser.add_argument('--nsamples', '-n' , type=int, help='The number of instances randomly sampled from negative control conditions', default=1000)
train_parser.add_argument('--neg_indicator', '-nd' , type=str, help='The token, which indicates a negative control condition in the mapping file', default='neg', dest='neg_condition')
predict_parser = subparsers.add_parser('encode', help='Encode image data (in cellh5) using a previously trained autoencoder. Result can be viewed and further analyzed using the CellExplorer GUI.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
predict_parser.set_defaults(action="encode")
predict_parser.add_argument('name', help='Name of the trained network (generated with option "train"' )
predict_parser.add_argument('cellh5_input', help='The cellh5 file as input for feature generation using an autoencoder.')
predict_parser.add_argument('cellh5_mapping', help='Position mapping table (.txt file), which contains all positions to be processed' )
main(parser.parse_args())
| gpl-3.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/test_packers.py | 1 | 30648 | import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
self.assertRaises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
with warnings.catch_warnings(record=True):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pydata/pandas/pull/9783
"""
def setUp(self):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super(TestCompression, self).setUp()
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
self.assertTrue(block.values.flags.writeable)
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_compression('zlib')
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_compression('blosc')
def _test_compression_warns_when_decompress_caches(self, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype('float64'): 1.0,
np.dtype('int32'): 1,
np.dtype('object'): 'a',
np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
}
with patch(compress_module, 'decompress', decompress), \
tm.assert_produces_warning(PerformanceWarning) as ws:
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
self.assertTrue(block.values.flags.writeable)
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
self.assertEqual(
str(w.message),
'copying data after decompressing; this may mean that'
' decompress is caching its result',
)
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
self.assertEqual(buf, control_buf)
def test_compression_warns_when_decompress_caches_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_compression_warns_when_decompress_caches('zlib')
def test_compression_warns_when_decompress_caches_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_compression_warns_when_decompress_caches('blosc')
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype='uint8')
with tm.assert_produces_warning(None):
empty_unpacked = self.encode_decode(empty, compress=compress)
np.testing.assert_array_equal(empty_unpacked, empty)
self.assertTrue(empty_unpacked.flags.writeable)
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
np.testing.assert_array_equal(char_unpacked, char)
self.assertTrue(char_unpacked.flags.writeable)
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
# we compare the ord of bytes b'a' with unicode u'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
self.assertEqual(ord(b'a'), ord(u'a'))
np.testing.assert_array_equal(
char_unpacked,
np.array([ord(b'b')], dtype='uint8'),
)
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_small_strings_no_warn('zlib')
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_small_strings_no_warn('blosc')
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
self.assertTrue(1 in self.encode_decode(df1['A'], compress='blosc'))
self.assertTrue(1. in self.encode_decode(df2['A'], compress='blosc'))
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
self.assertTrue(1 in self.encode_decode(df1['A'], compress='zlib'))
self.assertTrue(1. in self.encode_decode(df2['A'], compress='zlib'))
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
if not self._SQLALCHEMY_INSTALLED:
raise nose.SkipTest('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='blosc')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
if not self._SQLALCHEMY_INSTALLED:
raise nose.SkipTest('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='zlib')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
class TestEncoding(TestPackers):
def setUp(self):
super(TestEncoding, self).setUp()
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in compat.itervalues(self.frame):
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding='utf8')
self.assertEqual(result, expected)
result = self.encode_decode(frame)
assert_frame_equal(result, frame)
class TestMsgpack():
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
NOTE: TestMsgpack can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_msgpack_data, create_data)
self.data = create_msgpack_data()
self.all_data = create_data()
self.path = u('__%s__.msgpack' % tm.rands(10))
self.minimum_structure = {'series': ['float', 'int', 'mixed',
'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
assert kind in data[
typ], '"{0}" not found in data["{1}"]'.format(kind, typ)
def compare(self, vf, version):
# GH12277 encoding default used to be latin-1, now utf-8
if LooseVersion(version) < '0.18.0':
data = read_msgpack(vf, encoding='latin-1')
else:
data = read_msgpack(vf)
self.check_min_structure(data)
for typ, dv in data.items():
assert typ in self.all_data, ('unpacked data contains '
'extra key "{0}"'
.format(typ))
for dt, result in dv.items():
assert dt in self.all_data[typ], ('data["{0}"] contains extra '
'key "{1}"'.format(typ, dt))
try:
expected = self.data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comparator = getattr(
self, "compare_{typ}_{dt}".format(typ=typ, dt=dt), None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def read_msgpacks(self, version):
pth = tm.get_data_path('legacy_msgpack/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
# GH12142 0.17 files packed in P2 can't be read in P3
if (compat.PY3 and
version.startswith('0.17.') and
f.split('.')[-4][-1] == '2'):
continue
vf = os.path.join(pth, f)
try:
self.compare(vf, version)
except ImportError:
# blosc not installed
continue
n += 1
assert n > 0, 'Msgpack files are not tested'
def test_msgpack(self):
msgpack_path = tm.get_data_path('legacy_msgpack')
n = 0
for v in os.listdir(msgpack_path):
pth = os.path.join(msgpack_path, v)
if os.path.isdir(pth):
yield self.read_msgpacks, v
n += 1
assert n > 0, 'Msgpack files are not tested'
| mit |
aabadie/scikit-learn | sklearn/manifold/setup.py | 24 | 1279 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
songeater/SONGSHTR | sampler.py | 1 | 5535 | '''
https://github.com/MattVitelli/GRUV
'''
from __future__ import print_function
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Lambda, Dropout, TimeDistributed, LSTM, Input
from keras import backend as K
from keras import objectives
from keras.optimizers import RMSprop
from keras.utils import generic_utils
from keras.utils.data_utils import get_file
from keras.callbacks import LearningRateScheduler
from sklearn.model_selection import train_test_split
import numpy as np
import random
import sys
import soundfile
import soundfunc as sf
import vae_model
import argparse
'''
----------
FUNCTIONS
----------
'''
parser = argparse.ArgumentParser(description='sample the music..')
parser.add_argument('-s', default="10_0", metavar='base', type=str,
help='suffix of model to be sampled from', required=False, dest='suffix')
parser.add_argument('-o', type=float, default=2, required=False,
help='Std Dev on sampling', dest='out_sd')
parser.add_argument('-l', type=int, default=800, required=False,
help='length of sequence', dest='outlen')
args = parser.parse_args()
'''
----------
INPUT VARIABLES
----------
'''
config = sf.get_config()
blocksize = config['blocksize']
compressed_blocksize = config['compressed_blocksize']
seqlen = config['seqlen']
win_edge = config['win_edge']
out_step = config['out_step']
sound_file = config['sound_file']
save_file = config['save_file']
batchsize = 1
domain = config['domain']
load_file_suffix= args.suffix
out_sd = args.out_sd
outlen = args.outlen
output_file = load_file_suffix+"_outsd="+str(int(out_sd*100))+"_"+domain+"_"+sound_file
load_file = save_file+load_file_suffix
sig, samplerate = soundfile.read(sound_file)
print("sample rate: ", samplerate)
print("sig shape: ", sig.shape)
print("flattened sig shape:", sig.shape)
sig_blocks = []
ang_blocks = []
'''
----------
PREPROCESS DATA
----------
'''
print("Dividing into blocks of size :", blocksize)
print("Converting to frequency domain...")
if domain == "rfft":
sig_blocks, ang_blocks = sf.conv_to_rfft(sig, blocksize, win_edge)
elif domain == "dct":
sig_blocks = sf.conv_to_dct(sig, blocksize, win_edge, compressed_blocksize)
print("Number of blocks:", len(sig_blocks))
print("Shape of blocks:", np.asarray(sig_blocks[0:len(sig_blocks)-1]).shape)
X_Train = np.zeros((batchsize, seqlen, compressed_blocksize))
y_Train = np.zeros((batchsize, compressed_blocksize))
'''
----------
BUILD MODEL
----------
'''
print('Build model...')
model = vae_model.build_model(out_sd, batchsize, seqlen, compressed_blocksize)
print(model.summary())
'''
----------
LOAD MODEL WEIGHTS
----------
'''
print('Loading model weights from...', load_file)
model.load_weights(load_file)
'''
----------
SEED THE MODEL
----------
'''
rand_start = np.random.randint(0,len(sig_blocks)-15*seqlen)
#"warming up" the stateful model
for feed in range (0,15):
print(feed)
seed_sample = sig_blocks[rand_start+seqlen*feed:rand_start+seqlen*(feed+1)]
#print(np.asarray(seed_sample).shape)
assert len(seed_sample)==seqlen
for i in range(0, len(seed_sample)):
x = np.zeros((batchsize, seqlen, compressed_blocksize))
if i == seqlen: break
x[0, i] = seed_sample[i]
model.predict(x, batch_size=batchsize, verbose=0)
print('Generating sample:')
x = np.zeros((batchsize, seqlen, compressed_blocksize))
if len(seed_sample)>seqlen: seed_sample=seed_sample[-seqlen:]
generated = []
last_seed = seed_sample
if domain=="rfft":
for i in range(outlen): #keep this even
x = np.zeros((batchsize, seqlen, compressed_blocksize))
for t in range(0, len(seed_sample)):
if t == seqlen: break
x[0, t] = seed_sample[t]
preds = model.predict_on_batch(x)[0]
print("preds shape:", np.asarray(preds).shape)
fft_pred = sf.conv_from_rfft(preds)
print("fft_pred shape:", np.asarray(fft_pred).shape)
generated.append(fft_pred)
print("generated shape:",np.asarray(generated).shape)
print("seed sample shape:",np.asarray(seed_sample).shape)
seed_sample=seed_sample[1:]
seed_sample.append(preds)
print("----")
new_gen = np.concatenate(sf.concat_sound_blocks(generated, win_edge))
print("new-gen shape:", np.asarray(new_gen).shape)
soundfile.write(output_file, new_gen, samplerate)
elif domain == "dct":
for i in range(outlen): #keep this even
x = np.zeros((batchsize, seqlen, compressed_blocksize))
for t in range(0, len(seed_sample)):
if t == seqlen: break
x[0, t] = seed_sample[t]
preds = model.predict_on_batch(x)[0]
print("preds shape:", np.asarray(preds).shape)
fft_pred = sf.conv_from_dct(preds, len(preds), blocksize)
print("fft_pred shape:", np.asarray(fft_pred).shape)
generated.append(fft_pred[0])
print("generated shape:",np.asarray(generated).shape)
print("seed sample shape:",np.asarray(seed_sample).shape)
seed_sample=seed_sample[1:]
seed_sample.append(preds)
print("----")
new_gen = np.concatenate(sf.concat_sound_blocks_mdct(generated, win_edge))
print("new-gen shape:", np.asarray(new_gen).shape)
soundfile.write(output_file, new_gen, samplerate)
| agpl-3.0 |
ptrendx/mxnet | example/rcnn/symdata/vis.py | 11 | 1559 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def vis_detection(im_orig, detections, class_names, thresh=0.7):
"""visualize [cls, conf, x1, y1, x2, y2]"""
import matplotlib.pyplot as plt
import random
plt.imshow(im_orig)
colors = [(random.random(), random.random(), random.random()) for _ in class_names]
for [cls, conf, x1, y1, x2, y2] in detections:
cls = int(cls)
if cls > 0 and conf > thresh:
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor=colors[cls], linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(x1, y1 - 2, '{:s} {:.3f}'.format(class_names[cls], conf),
bbox=dict(facecolor=colors[cls], alpha=0.5), fontsize=12, color='white')
plt.show()
| apache-2.0 |
ShawnMurd/MetPy | examples/plots/surface_declarative.py | 6 | 2177 | # Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
=========================================
Surface Analysis using Declarative Syntax
=========================================
The MetPy declarative syntax allows for a simplified interface to creating common
meteorological analyses including surface observation plots.
"""
########################################
from datetime import datetime, timedelta
import cartopy.crs as ccrs
import pandas as pd
from metpy.cbook import get_test_data
import metpy.plots as mpplots
########################################
# **Getting the data**
#
# In this example, data is originally from the Iowa State ASOS archive
# (https://mesonet.agron.iastate.edu/request/download.phtml) downloaded through a separate
# Python script. The data are pre-processed to determine sky cover and weather symbols from
# text output.
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
########################################
# **Plotting the data**
#
# Use the declarative plotting interface to plot surface observations over the state of
# Georgia.
# Plotting the Observations using a 15 minute time window for surface observations
obs = mpplots.PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 13)
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym']
obs.locations = ['NW', 'SW', 'NE', 'C', 'W']
obs.colors = ['red', 'green', 'black', 'black', 'blue']
obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover',
'current_weather']
obs.vector_field = ('uwind', 'vwind')
obs.reduce_points = 1
# Add map features for the particular panel
panel = mpplots.MapPanel()
panel.layout = (1, 1, 1)
panel.area = 'ga'
panel.projection = ccrs.PlateCarree()
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [obs]
# Collecting panels for complete figure
pc = mpplots.PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
# Showing the results
pc.show()
| bsd-3-clause |
chrisburr/scikit-learn | examples/mixture/plot_gmm_classifier.py | 22 | 4015 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
print(color)
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
larsoner/mne-python | mne/source_estimate.py | 2 | 127983 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Martin Luessi <[email protected]>
# Mads Jensen <[email protected]>
#
# License: BSD (3-clause)
import contextlib
import copy
import os.path as op
from types import GeneratorType
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import coo_matrix, block_diag as sparse_block_diag
from .baseline import rescale
from .cov import Covariance
from .evoked import _get_peak
from .filter import resample
from .io.constants import FIFF
from .surface import (read_surface, _get_ico_surface, mesh_edges,
_project_onto_surface)
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject, SourceSpaces, _get_src_nn,
_import_nibabel, _get_mri_info_data,
_get_atlas_values, _check_volume_labels,
read_freesurfer_lut)
from .transforms import _get_trans, apply_trans
from .utils import (get_subjects_dir, _check_subject, logger, verbose, _pl,
_time_mask, warn, copy_function_doc_to_method_doc,
fill_doc, _check_option, _validate_type, _check_src_normal,
_check_stc_units, _check_pandas_installed,
_check_pandas_index_arguments, _convert_times, _ensure_int,
_build_data_frame, _check_time_format, _check_path_like,
sizeof_fmt, object_size)
from .viz import (plot_source_estimates, plot_vector_source_estimates,
plot_volume_source_estimates)
from .io.base import TimeMixin
from .io.meas_info import Info
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
"""Aux Function."""
with open(filename, 'rb') as fid:
buf = fid.read()
stc = dict()
offset = 0
num_bytes = 4
# read tmin in ms
stc['tmin'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tmin'] /= 1000.0
offset += num_bytes
# read sampling rate in ms
stc['tstep'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tstep'] /= 1000.0
offset += num_bytes
# read number of vertices/sources
vertices_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
# read the source vector
stc['vertices'] = np.frombuffer(buf, dtype=">u4", count=vertices_n,
offset=offset)
offset += num_bytes * vertices_n
# read the number of timepts
data_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
if (vertices_n and # vertices_n can be 0 (empty stc)
((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.frombuffer(buf, dtype=">f4", count=vertices_n * data_n,
offset=offset)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file.
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tobytes())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tobytes())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tobytes())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tobytes())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tobytes())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tobytes())
# close the file
fid.close()
def _read_3(fid):
"""Read 3 byte integer from file."""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
"""Write 3 byte integer to file."""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tobytes())
def _write_w(filename, vertices, data):
"""Write a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert (len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tobytes())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tobytes())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a source estimate object.
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate
The source estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for vector surface source estimates, only HDF5 files are supported.
- for mixed source estimates, only HDF5 files are supported.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
""" # noqa: E501
fname_arg = fname
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('.h5'):
ftype = 'h5'
fname = fname[:-3]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype != 'volume':
stc_exist = [op.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [op.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
if all(stc_exist) and ftype != 'w':
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif op.exists(fname + '.h5'):
ftype = 'h5'
elif op.exists(fname + '-stc.h5'):
ftype = 'h5'
fname += '-stc'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
kwargs['vertices'] = [kwargs['vertices']]
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
ftype = 'surface'
elif ftype == 'h5':
kwargs = read_hdf5(fname + '.h5', title='mnepython')
ftype = kwargs.pop('src_type', 'surface')
if isinstance(kwargs['vertices'], np.ndarray):
kwargs['vertices'] = [kwargs['vertices']]
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype in ('volume', 'discrete'):
klass = VolVectorSourceEstimate
elif ftype == 'mixed':
klass = MixedVectorSourceEstimate
else:
assert ftype == 'surface'
klass = VectorSourceEstimate
if kwargs['data'].ndim < 3:
klass = klass._scalar_class
return klass(**kwargs)
def _get_src_type(src, vertices, warn_text=None):
src_type = None
if src is None:
if warn_text is None:
warn("src should not be None for a robust guess of stc type.")
else:
warn(warn_text)
if isinstance(vertices, list) and len(vertices) == 2:
src_type = 'surface'
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \
and len(vertices) == 1:
src_type = 'volume'
elif isinstance(vertices, list) and len(vertices) > 2:
src_type = 'mixed'
else:
src_type = src.kind
assert src_type in ('surface', 'volume', 'mixed', 'discrete')
return src_type
def _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,
subject=None, vector=False, source_nn=None, warn_text=None):
"""Generate a surface, vector-surface, volume or mixed source estimate."""
def guess_src_type():
return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)
src_type = guess_src_type() if src_type is None else src_type
if vector and src_type == 'surface' and source_nn is None:
raise RuntimeError('No source vectors supplied.')
# infer Klass from src_type
if src_type == 'surface':
Klass = VectorSourceEstimate if vector else SourceEstimate
elif src_type in ('volume', 'discrete'):
Klass = VolVectorSourceEstimate if vector else VolSourceEstimate
elif src_type == 'mixed':
Klass = MixedVectorSourceEstimate if vector else MixedSourceEstimate
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
# Rotate back for vector source estimates
if vector:
n_vertices = sum(len(v) for v in vertices)
assert data.shape[0] in (n_vertices, n_vertices * 3)
if len(data) == n_vertices:
assert src_type == 'surface' # should only be possible for this
assert source_nn.shape == (n_vertices, 3)
data = data[:, np.newaxis] * source_nn[:, :, np.newaxis]
else:
data = data.reshape((-1, 3, data.shape[-1]))
assert source_nn.shape in ((n_vertices, 3, 3),
(n_vertices * 3, 3))
# This will be an identity transform for volumes, but let's keep
# the code simple and general and just do the matrix mult
data = np.matmul(
np.transpose(source_nn.reshape(n_vertices, 3, 3),
axes=[0, 2, 1]), data)
return Klass(
data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject
)
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations."""
compat = False
if type(a) != type(b):
raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine source estimates that do not have '
'the same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(TimeMixin):
_data_ndim = 2
@verbose
def __init__(self, data, vertices, tmin, tstep,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_data_ndim'), self.__class__.__name__
assert hasattr(self, '_src_type'), self.__class__.__name__
assert hasattr(self, '_src_count'), self.__class__.__name__
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel (%s) and sens_data (%s) have invalid '
'dimensions'
% (kernel.shape, sens_data.shape))
if sens_data.ndim != 2:
raise ValueError('The sensor data must have 2 dimensions, got '
'%s' % (sens_data.ndim,))
_validate_type(vertices, list, 'vertices')
if self._src_count is not None:
if len(vertices) != self._src_count:
raise ValueError('vertices must be a list with %d entries, '
'got %s' % (self._src_count, len(vertices)))
vertices = [np.array(v, np.int64) for v in vertices] # makes copy
if any(np.any(np.diff(v) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing order.')
n_src = sum([len(v) for v in vertices])
# safeguard the user against doing something silly
if data is not None:
if data.ndim not in (self._data_ndim, self._data_ndim - 1):
raise ValueError('Data (shape %s) must have %s dimensions for '
'%s' % (data.shape, self._data_ndim,
self.__class__.__name__))
if data.shape[0] != n_src:
raise ValueError(
f'Number of vertices ({n_src}) and stc.data.shape[0] '
f'({data.shape[0]}) must match')
if self._data_ndim == 3:
if data.shape[1] != 3:
raise ValueError(
'Data for VectorSourceEstimate must have '
'shape[1] == 3, got shape %s' % (data.shape,))
if data.ndim == self._data_ndim - 1: # allow upbroadcasting
data = data[..., np.newaxis]
self._data = data
self._tmin = tmin
self._tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self._times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
def __repr__(self): # noqa: D105
s = "%d vertices" % (sum(len(v) for v in self.vertices),)
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data shape : %s" % (self.shape,)
sz = sum(object_size(x) for x in (self.vertices + [self.data]))
s += f", ~{sizeof_fmt(sz)}"
return "<%s | %s>" % (type(self).__name__, s)
@fill_doc
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
stc = self.magnitude() if self._data_ndim == 3 else self
if self._n_vertices == 0:
raise RuntimeError('Cannot find peaks with no vertices')
vert_idx, time_idx, _ = _get_peak(
stc.data, self.times, tmin, tmax, mode)
if not vert_as_index:
vert_idx = np.concatenate(self.vertices)[vert_idx]
if not time_as_index:
time_idx = self.times[time_idx]
return vert_idx, time_idx
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, verbose=verbose)
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct source estimate data.
Parameters
----------
%(baseline_stc)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The baseline-corrected source estimate object.
Notes
-----
Baseline correction can be done multiple times.
"""
self.data = rescale(self.data, self.times, baseline, copy=False)
return self
@verbose
def save(self, fname, ftype='h5', verbose=None):
"""Save the full source estimate to an HDF5 file.
Parameters
----------
fname : str
The file name to write the source estimate to, should end in
'-stc.h5'.
ftype : str
File format to use. Currently, the only allowed values is "h5".
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
if ftype != 'h5':
raise ValueError('%s objects can only be written as HDF5 files.'
% (self.__class__.__name__,))
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep, subject=self.subject,
src_type=self._src_type),
title='mnepython', overwrite=True)
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=1.0, time_viewer='auto',
subjects_dir=None,
figure=None, views='auto', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None):
brain = plot_source_estimates(
self, subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit, backend=backend,
spacing=spacing, title=title, show_traces=show_traces,
src=src, volume_options=volume_options, view_layout=view_layout,
add_data_kwargs=add_data_kwargs, verbose=verbose)
return brain
@property
def sfreq(self):
"""Sample rate of the data."""
return 1. / self.tstep
@property
def _n_vertices(self):
return sum(len(v) for v in self.vertices)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data."""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Restrict SourceEstimate to a time interval.
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
%(include_tmax)s
Returns
-------
stc : instance of SourceEstimate
The cropped source estimate.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,
include_tmax=include_tmax)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[..., mask]
else:
self.data = self.data[..., mask]
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample data.
If appropriate, an anti-aliasing filter is applied before resampling.
See :ref:`resampling-and-decimating` for more information.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : str | tuple
Window to use in resampling. See :func:`scipy.signal.resample`.
%(n_jobs)s
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The resampled source estimate.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
data = self.data
if data.dtype == np.float32:
data = data.astype(np.float64)
self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
return self
@property
def data(self):
"""Numpy array of source estimate data."""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@data.setter
def data(self, value):
value = np.asarray(value)
if self._data is not None and value.ndim != self._data.ndim:
raise ValueError('Data array should have %d dimensions.' %
self._data.ndim)
n_verts = sum(len(v) for v in self.vertices)
if value.shape[0] != n_verts:
raise ValueError('The first dimension of the data array must '
'match the number of vertices (%d != %d)' %
(value.shape[0], n_verts))
self._data = value
self._update_times()
@property
def shape(self):
"""Shape of the data."""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
@property
def tmin(self):
"""The first timestamp."""
return self._tmin
@tmin.setter
def tmin(self, value):
self._tmin = float(value)
self._update_times()
@property
def tstep(self):
"""The change in time between two consecutive samples (1 / sfreq)."""
return self._tstep
@tstep.setter
def tstep(self, value):
if value <= 0:
raise ValueError('.tstep must be greater than 0.')
self._tstep = float(value)
self._update_times()
@property
def times(self):
"""A timestamp for each sample."""
return self._times
@times.setter
def times(self, value):
raise ValueError('You cannot write to the .times attribute directly. '
'This property automatically updates whenever '
'.tmin, .tstep or .data changes.')
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep."""
self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))
self._times.flags.writeable = False
def __add__(self, a):
"""Add source estimates."""
stc = self.copy()
stc += a
return stc
def __iadd__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data += a.data
else:
self.data += a
return self
def mean(self):
"""Make a summary stc file with mean over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
out = self.sum()
out /= len(self.times)
return out
def sum(self):
"""Make a summary stc file with sum over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[-1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return sum_stc
def __sub__(self, a):
"""Subtract source estimates."""
stc = self.copy()
stc -= a
return stc
def __isub__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data -= a.data
else:
self.data -= a
return self
def __truediv__(self, a): # noqa: D105
return self.__div__(a)
def __div__(self, a): # noqa: D105
"""Divide source estimates."""
stc = self.copy()
stc /= a
return stc
def __itruediv__(self, a): # noqa: D105
return self.__idiv__(a)
def __idiv__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data /= a.data
else:
self.data /= a
return self
def __mul__(self, a):
"""Multiply source estimates."""
stc = self.copy()
stc *= a
return stc
def __imul__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data *= a.data
else:
self.data *= a
return self
def __pow__(self, a): # noqa: D105
stc = self.copy()
stc **= a
return stc
def __ipow__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
self.data **= a
return self
def __radd__(self, a): # noqa: D105
return self + a
def __rsub__(self, a): # noqa: D105
return self - a
def __rmul__(self, a): # noqa: D105
return self * a
def __rdiv__(self, a): # noqa: D105
return self / a
def __neg__(self): # noqa: D105
"""Negate the source estimate."""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc.data *= -1
return stc
def __pos__(self): # noqa: D105
return self
def __abs__(self):
"""Compute the absolute value of the data.
Returns
-------
stc : instance of _BaseSourceEstimate
A version of the source estimate, where the data attribute is set
to abs(self.data).
"""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc._data = abs(stc._data)
return stc
def sqrt(self):
"""Take the square root.
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of source estimate instance.
Returns
-------
stc : instance of SourceEstimate
A copy of the source estimate.
"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Return a source estimate object with data summarized over time bins.
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The binned source estimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nt = len(times) - 1
data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[..., i] = func(self.data[..., idx], axis=-1)
tmin = times[0] + width / 2.
stc = self.copy()
stc._data = data
stc.tmin = tmin
stc.tstep = width
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn('Performance can be improved by not accessing the data '
'attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. See Notes for details.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : SourceEstimate | VectorSourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
# min and max data indices to include
times = 1000. * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
# +1, because upper boundary needs to include the last sample
tmax_idx = t_idx[-1] + 1
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmin = self.times[tmin_idx]
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs.vertices = verts
stcs.data = data_t
stcs.tmin = tmin
return stcs
@fill_doc
def to_data_frame(self, index=None, scalings=None,
long_format=False, time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Vertices are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(df_index_evk)s
Defaults to ``None``.
%(df_scalings)s
%(df_longform_stc)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'subject']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
data = self.data.T
times = self.times
# prepare extra columns / multiindex
mindex = list()
default_index = ['time']
if self.subject is not None:
default_index = ['subject', 'time']
mindex.append(('subject', np.repeat(self.subject, data.shape[0])))
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
# triage surface vs volume source estimates
col_names = list()
kinds = ['VOL'] * len(self.vertices)
if isinstance(self, (_BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate)):
kinds[:2] = ['LH', 'RH']
for ii, (kind, vertno) in enumerate(zip(kinds, self.vertices)):
col_names.extend(['{}_{}'.format(kind, vert) for vert in vertno])
# build DataFrame
df = _build_data_frame(self, data, None, long_format, mindex, index,
default_index=default_index,
col_names=col_names, col_kind='source')
return df
def _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,
restrict_vertices):
"""Find the center of mass on a surface."""
if (values == 0).all() or (values < 0).any():
raise ValueError('All values must be non-negative and at least one '
'must be non-zero, cannot compute COM')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf))
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0
restrict_vertices = restrict_vertices[idx]['vertno']
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
return vertex
@fill_doc
class _BaseSurfaceSourceEstimate(_BaseSourceEstimate):
"""Abstract base class for surface source estimates.
Parameters
----------
data : array
The data in source space.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
data : array
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
_src_type = 'surface'
_src_count = 2
@property
def lh_data(self):
"""Left hemisphere data."""
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
"""Right hemisphere data."""
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
"""Left hemisphere vertno."""
return self.vertices[0]
@property
def rh_vertno(self):
"""Right hemisphere vertno."""
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The source estimate restricted to the given label.
"""
# make sure label and stc are compatible
from .label import Label, BiHemiLabel
_validate_type(label, (Label, BiHemiLabel), 'label')
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
else:
assert label.hemi == 'rh'
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices.
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds),) + self.data.shape[1:])
self.data = np.insert(self.data, inds, new_data, axis=0)
return self
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Get a source estimate from morphed source to the original subject.
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
%(subjects_dir)s
%(verbose_meth)s
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The transformed source estimate.
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surface')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return self.__class__(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@fill_doc
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
_check_option('hemi', hemi, ('lh', 'rh', None))
vertex_offset = 0
if hemi is not None:
if hemi == 'lh':
data = self.lh_data
vertices = [self.lh_vertno, []]
else:
vertex_offset = len(self.vertices[0])
data = self.rh_data
vertices = [[], self.rh_vertno]
meth = self.__class__(
data, vertices, self.tmin, self.tstep).get_peak
else:
meth = super().get_peak
out = meth(tmin=tmin, tmax=tmax, mode=mode,
vert_as_index=vert_as_index,
time_as_index=time_as_index)
if vertex_offset and vert_as_index:
out = (out[0] + vertex_offset, out[1])
return out
@fill_doc
class SourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for surface source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. When it is a single array, the
left hemisphere is stored in data[:len(vertices[0])] and the right
hemisphere is stored in data[-len(vertices[1]):].
When data is a tuple, it contains two arrays:
- "kernel" shape (n_vertices, n_sensors) and
- "sens_data" shape (n_sensors, n_times).
In this case, the source space data corresponds to
``np.dot(kernel, sens_data)``.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
if np.iscomplexobj(self.data):
raise ValueError("Cannot save complex-valued STC data in "
"FIFF format; please set ftype='h5' to save "
"in HDF5 format instead, or cast the data to "
"real numbers before saving.")
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
super().save(fname)
logger.info('[done]')
@verbose
def estimate_snr(self, info, fwd, cov, verbose=None):
r"""Compute time-varying SNR in the source space.
This function should only be used with source estimates with units
nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).
.. warning:: This function currently only works properly for fixed
orientation.
Parameters
----------
info : instance Info
The measurement info.
fwd : instance of Forward
The forward solution used to create the source estimate.
cov : instance of Covariance
The noise covariance used to estimate the resting cortical
activations. Should be an evoked covariance, not empty room.
%(verbose)s
Returns
-------
snr_stc : instance of SourceEstimate
The source estimate with the SNR computed.
Notes
-----
We define the SNR in decibels for each source location at each
time point as:
.. math::
{\rm SNR} = 10\log_10[\frac{a^2}{N}\sum_k\frac{b_k^2}{s_k^2}]
where :math:`\\b_k` is the signal on sensor :math:`k` provided by the
forward model for a source with unit amplitude, :math:`a` is the
source amplitude, :math:`N` is the number of sensors, and
:math:`s_k^2` is the noise variance on sensor :math:`k`.
References
----------
.. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon,
D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009).
Mapping the Signal-To-Noise-Ratios of Cortical Sources in
Magnetoencephalography and Electroencephalography.
Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571
"""
from .forward import convert_forward_solution, Forward
from .minimum_norm.inverse import _prepare_forward
_validate_type(fwd, Forward, 'fwd')
_validate_type(info, Info, 'info')
_validate_type(cov, Covariance, 'cov')
_check_stc_units(self)
if (self.data >= 0).all():
warn('This STC appears to be from free orientation, currently SNR'
' function is valid only for fixed orientation')
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
# G is gain matrix [ch x src], cov is noise covariance [ch x ch]
G, _, _, _, _, _, _, cov, _ = _prepare_forward(
fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,
use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',
allow_fixed_depth=False, limit=None)
G = G['sol']['data']
n_channels = cov['dim'] # number of sensors/channels
b_k2 = (G * G).T
s_k2 = np.diag(cov['data'])
scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)
snr_stc = self.copy()
snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)
return snr_stc
@fill_doc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of activity.
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in [1]_.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.vertex_to_mni`.
Parameters
----------
subject : str | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
See Also
--------
mne.Label.center_of_mass
mne.vertex_to_mni
References
----------
.. [1] Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
if not isinstance(surf, str):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
_check_option('hemi', hemi, [0, 1])
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,
subject=subject, subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
class _BaseVectorSourceEstimate(_BaseSourceEstimate):
_data_ndim = 3
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_scalar_class')
super().__init__(data, vertices, tmin, tstep, subject, verbose)
def magnitude(self):
"""Compute magnitude of activity without directionality.
Returns
-------
stc : instance of SourceEstimate
The source estimate without directionality information.
"""
data_mag = np.linalg.norm(self.data, axis=1)
return self._scalar_class(
data_mag, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
def _get_src_normals(self, src, use_cps):
normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in
zip(src, self.vertices)])
return normals
@fill_doc
def project(self, directions, src=None, use_cps=True):
"""Project the data for each vertex in a given direction.
Parameters
----------
directions : ndarray, shape (n_vertices, 3) | str
Can be:
- ``'normal'``
Project onto the source space normals.
- ``'pca'``
SVD will be used to project onto the direction of maximal
power for each source.
- :class:`~numpy.ndarray`, shape (n_vertices, 3)
Projection directions for each source.
src : instance of SourceSpaces | None
The source spaces corresponding to the source estimate.
Not used when ``directions`` is an array, optional when
``directions='pca'``.
%(use_cps)s
Should be the same value that was used when the forward model
was computed (typically True).
Returns
-------
stc : instance of SourceEstimate
The projected source estimate.
directions : ndarray, shape (n_vertices, 3)
The directions that were computed (or just used).
Notes
-----
When using SVD, there is a sign ambiguity for the direction of maximal
power. When ``src is None``, the direction is chosen that makes the
resulting time waveform sum positive (i.e., have positive amplitudes).
When ``src`` is provided, the directions are flipped in the direction
of the source normals, i.e., outward from cortex for surface source
spaces and in the +Z / superior direction for volume source spaces.
.. versionadded:: 0.21
"""
_validate_type(directions, (str, np.ndarray), 'directions')
_validate_type(src, (None, SourceSpaces), 'src')
if isinstance(directions, str):
_check_option('directions', directions, ('normal', 'pca'),
extra='when str')
if directions == 'normal':
if src is None:
raise ValueError(
'If directions="normal", src cannot be None')
_check_src_normal('normal', src)
directions = self._get_src_normals(src, use_cps)
else:
assert directions == 'pca'
x = self.data
if not np.isrealobj(self.data):
_check_option('stc.data.dtype', self.data.dtype,
(np.complex64, np.complex128))
dtype = \
np.float32 if x.dtype == np.complex64 else np.float64
x = x.view(dtype)
assert x.shape[-1] == 2 * self.data.shape[-1]
u, _, v = np.linalg.svd(x, full_matrices=False)
directions = u[:, :, 0]
# The sign is arbitrary, so let's flip it in the direction that
# makes the resulting time series the most positive:
if src is None:
signs = np.sum(v[:, 0].real, axis=1, keepdims=True)
else:
normals = self._get_src_normals(src, use_cps)
signs = np.sum(directions * normals, axis=1, keepdims=True)
assert signs.shape == (self.data.shape[0], 1)
signs = np.sign(signs)
signs[signs == 0] = 1.
directions *= signs
_check_option(
'directions.shape', directions.shape, [(self.data.shape[0], 3)])
data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0]
stc = self._scalar_class(
data_norm, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
return stc, directions
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='lateral',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None, volume_options=1.,
view_layout='vertical', add_data_kwargs=None,
verbose=None): # noqa: D102
return plot_vector_source_estimates(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
class _BaseVolSourceEstimate(_BaseSourceEstimate):
_src_type = 'volume'
_src_count = None
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot_3d(self, subject=None, surface='white', hemi='both',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=0.1, time_viewer='auto',
subjects_dir=None,
figure=None, views='axial', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None):
return super().plot(
subject=subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure, views=views, colorbar=colorbar, clim=clim,
cortex=cortex, size=size, background=background,
foreground=foreground, initial_time=initial_time,
time_unit=time_unit, backend=backend, spacing=spacing, title=title,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_volume_source_estimates)
def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',
bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto',
transparent='auto', show=True, initial_time=None,
initial_pos=None, verbose=None):
data = self.magnitude() if self._data_ndim == 3 else self
return plot_volume_source_estimates(
data, src=src, subject=subject, subjects_dir=subjects_dir,
mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,
clim=clim, transparent=transparent, show=show,
initial_time=initial_time, initial_pos=initial_pos,
verbose=verbose)
# Override here to provide the volume-specific options
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, *, trans=None,
mri_resolution=True, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(trans_deprecated)s
%(eltc_mri_resolution)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, trans=trans,
mri_resolution=mri_resolution, verbose=verbose)
@fill_doc
def in_label(self, label, mri, src, trans=None):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : str | int
The label to use. Can be the name of a label if using a standard
FreeSurfer atlas, or an integer value to extract from the ``mri``.
mri : str
Path to the atlas to use.
src : instance of SourceSpaces
The volumetric source space. It must be a single, whole-brain
volume.
%(trans_deprecated)s
Returns
-------
stc : VolSourceEstimate | VolVectorSourceEstimate
The source estimate restricted to the given label.
Notes
-----
.. versionadded:: 0.21.0
"""
if len(self.vertices) != 1:
raise RuntimeError('This method can only be used with whole-brain '
'volume source spaces')
_validate_type(label, (str, 'int-like'), 'label')
if isinstance(label, str):
volume_label = [label]
else:
volume_label = {'Volume ID %s' % (label): _ensure_int(label)}
_dep_trans(trans)
label = _volume_labels(src, (mri, volume_label), mri_resolution=False)
assert len(label) == 1
label = label[0]
vertices = label.vertices
keep = np.in1d(self.vertices[0], label.vertices)
values, vertices = self.data[keep], [self.vertices[0][keep]]
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : str
The name of the generated nifti file.
src : list
The list of source spaces (should all be of type volume).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
.. versionadded:: 0.17
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,
format=format)
nib.save(img, fname)
def as_volume(self, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Export volume source estimate as a nifti object.
Parameters
----------
src : instance of SourceSpaces
The source spaces (should all be of type volume, or part of a
mixed source space).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
Returns
-------
img : instance of Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
from .morph import _interpolate_data
data = self.magnitude() if self._data_ndim == 3 else self
return _interpolate_data(data, src, mri_resolution=mri_resolution,
mri_space=True, output=format)
@fill_doc
class VolSourceEstimate(_BaseVolSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolVectorSourceEstimate : A container for volume vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
if ftype != 'h5' and len(self.vertices) != 1:
raise ValueError('Can only write to .stc or .w if a single volume '
'source space was used, use .h5 instead')
if ftype != 'h5' and self.data.dtype == 'complex':
raise ValueError('Can only write non-complex data to .stc or .w'
', use .h5 instead')
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices[0], data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices[0], data=self.data)
elif ftype == 'h5':
super().save(fname, 'h5')
logger.info('[done]')
@fill_doc
class VolVectorSourceEstimate(_BaseVolSourceEstimate,
_BaseVectorSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
_scalar_class = VolSourceEstimate
# defaults differ: hemi='both', views='axial'
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot_3d(self, subject=None, hemi='both', colormap='hot',
time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='axial',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None,
volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None): # noqa: D102
return _BaseVectorSourceEstimate.plot(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
@fill_doc
class VectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseSurfaceSourceEstimate):
"""Container for vector surface source estimates.
For each vertex, the magnitude of the current is defined in the X, Y and Z
directions.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.15
"""
_scalar_class = SourceEstimate
###############################################################################
# Mixed source estimate (two cortical surfs plus other stuff)
class _BaseMixedSourceEstimate(_BaseSourceEstimate):
_src_type = 'mixed'
_src_count = None
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
super().__init__(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@property
def _n_surf_vert(self):
return sum(len(v) for v in self.vertices[:2])
def surface(self):
"""Return the cortical surface source estimate.
Returns
-------
stc : instance of SourceEstimate or VectorSourceEstimate
The surface source estimate.
"""
if self._data_ndim == 3:
klass = VectorSourceEstimate
else:
klass = SourceEstimate
return klass(
self.data[:self._n_surf_vert], self.vertices[:2],
self.tmin, self.tstep, self.subject, self.verbose)
def volume(self):
"""Return the volume surface source estimate.
Returns
-------
stc : instance of VolSourceEstimate or VolVectorSourceEstimate
The volume source estimate.
"""
if self._data_ndim == 3:
klass = VolVectorSourceEstimate
else:
klass = VolSourceEstimate
return klass(
self.data[self._n_surf_vert:], self.vertices[2:],
self.tmin, self.tstep, self.subject, self.verbose)
@fill_doc
class MixedSourceEstimate(_BaseMixedSourceEstimate):
"""Container for mixed surface and volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
VolVectorSourceEstimate : A container for Volume vector source estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@fill_doc
class MixedVectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseMixedSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array, shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (n_src,)
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array, shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.21.0
"""
_scalar_class = MixedSourceEstimate
###############################################################################
# Morphing
def _get_vol_mask(src):
"""Get the volume source space mask."""
assert len(src) == 1 # not a mixed source space
shape = src[0]['shape'][::-1]
mask = np.zeros(shape, bool)
mask.flat[src[0]['vertno']] = True
return mask
def _spatio_temporal_src_adjacency_vol(src, n_times):
from sklearn.feature_extraction import grid_to_graph
mask = _get_vol_mask(src)
edges = grid_to_graph(*mask.shape, mask=mask)
adjacency = _get_adjacency_from_edges(edges, n_times)
return adjacency
def _spatio_temporal_src_adjacency_surf(src, n_times):
if src[0]['use_tris'] is None:
# XXX It would be nice to support non oct source spaces too...
raise RuntimeError("The source space does not appear to be an ico "
"surface. adjacency cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]
tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off
for u_v, s, off in zip(used_verts, src, offs)])
adjacency = spatio_temporal_tris_adjacency(tris, n_times)
# deal with source space only using a subset of vertices
masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != adjacency.shape[0] / n_times:
raise ValueError('Used vertices do not match adjacency shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn('%0.1f%% of original source space vertices have been'
' omitted, tri-based adjacency will have holes.\n'
'Consider using distance-based adjacency or '
'morphing data to all source space vertices.' % missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
adjacency = adjacency.tocsr()
adjacency = adjacency[masks]
adjacency = adjacency[:, masks]
# return to original format
adjacency = adjacency.tocoo()
return adjacency
@verbose
def spatio_temporal_src_adjacency(src, n_times, dist=None, verbose=None):
"""Compute adjacency for a source space activation over time.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
# XXX we should compute adjacency for each source space and then
# use scipy.sparse.block_diag to concatenate them
if src[0]['type'] == 'vol':
if dist is not None:
raise ValueError('dist must be None for a volume '
'source space. Got %s.' % dist)
adjacency = _spatio_temporal_src_adjacency_vol(src, n_times)
elif dist is not None:
# use distances computed and saved in the source space file
adjacency = spatio_temporal_dist_adjacency(src, n_times, dist)
else:
adjacency = _spatio_temporal_src_adjacency_surf(src, n_times)
return adjacency
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade.
Parameters
----------
grade : int
Grade of an icosahedral mesh.
%(verbose)s
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_adjacency.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute adjacency from triangles and time instants.
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris)
edges = (edges + sparse.eye(edges.shape[0], format='csr')).tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None):
"""Compute adjacency from distances in a source space and time instants.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
n_times : int
Number of time points.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using '
'setup_source_space with add_dist=True')
blocks = [s['dist'][s['vertno'], :][:, s['vertno']] for s in src]
# Ensure we keep explicit zeros; deal with changes in SciPy
for block in blocks:
if isinstance(block, np.ndarray):
block[block == 0] = -np.inf
else:
block.data[block.data == 0] == -1
edges = sparse_block_diag(blocks)
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatial_src_adjacency(src, dist=None, verbose=None):
"""Compute adjacency for a source space activation.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_src_adjacency(src, 1, dist)
@verbose
def spatial_tris_adjacency(tris, remap_vertices=False, verbose=None):
"""Compute adjacency from triangles.
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_adjacency(tris, 1, remap_vertices)
@verbose
def spatial_dist_adjacency(src, dist, verbose=None):
"""Compute adjacency from distances in a source space.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_adjacency(src, 1, dist)
@verbose
def spatial_inter_hemi_adjacency(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere.
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric adjacency matrix, e.g. computed
using geodesic distances.
"""
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surface')
adj = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
adj = sparse.csr_matrix(adj <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in adj.shape]
adj = sparse.vstack([sparse.hstack([empties[0], adj]),
sparse.hstack([adj.T, empties[1]])])
return adj
@verbose
def _get_adjacency_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create adjacency matrix."""
n_vertices = edges.shape[0]
logger.info("-- number of adjacent vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz))
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int64)
adjacency = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices,) * 2)
return adjacency
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def _pca_flip(flip, data):
U, s, V = linalg.svd(data, full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(data))
return sign * scale * V[0]
_label_funcs = {
'mean': lambda flip, data: np.mean(data, axis=0),
'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),
'max': lambda flip, data: np.max(np.abs(data), axis=0),
'pca_flip': _pca_flip,
}
@contextlib.contextmanager
def _temporary_vertices(src, vertices):
orig_vertices = [s['vertno'] for s in src]
for s, v in zip(src, vertices):
s['vertno'] = v
try:
yield
finally:
for s, v in zip(src, orig_vertices):
s['vertno'] = v
def _check_stc_src(stc, src):
if stc is not None and src is not None:
for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):
n_missing = (~np.in1d(v, s['vertno'])).sum()
if n_missing:
raise ValueError('%d/%d %s hemisphere stc vertices '
'missing from the source space, likely '
'mismatch' % (n_missing, len(v), hemi))
def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse):
"""Prepare indices and flips for extract_label_time_course."""
# If src is a mixed src space, the first 2 src spaces are surf type and
# the other ones are vol type. For mixed source space n_labels will be
# given by the number of ROIs of the cortical parcellation plus the number
# of vol src space.
# If stc=None (i.e. no activation time courses provided) and mode='mean',
# only computes vertex indices and label_flip will be list of None.
from .label import label_sign_flip, Label, BiHemiLabel
# if source estimate provided in stc, get vertices from source space and
# check that they are the same as in the stcs
_check_stc_src(stc, src)
vertno = [s['vertno'] for s in src] if stc is None else stc.vertices
nvert = [len(vn) for vn in vertno]
# initialization
label_flip = list()
label_vertidx = list()
bad_labels = list()
for li, label in enumerate(labels):
if use_sparse:
assert isinstance(label, dict)
vertidx = label['csr']
# This can happen if some labels aren't present in the space
if vertidx.shape[0] == 0:
bad_labels.append(label['name'])
vertidx = None
# Efficiency shortcut: use linearity early to avoid redundant
# calculations
elif mode == 'mean':
vertidx = sparse.csr_matrix(vertidx.mean(axis=0))
label_vertidx.append(vertidx)
label_flip.append(None)
continue
# standard case
_validate_type(label, (Label, BiHemiLabel), 'labels[%d]' % (li,))
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertices = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertices)
elif slabel.hemi == 'rh':
this_vertices = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
this_flip = None
if len(this_vertidx) == 0:
bad_labels.append(label.name)
this_vertidx = None # to later check if label is empty
elif mode not in ('mean', 'max'): # mode-dependent initialization
# label_sign_flip uses two properties:
#
# - src[ii]['nn']
# - src[ii]['vertno']
#
# So if we override vertno with the stc vertices, it will pick
# the correct normals.
with _temporary_vertices(src, stc.vertices):
this_flip = label_sign_flip(label, src[:2])[:, None]
label_vertidx.append(this_vertidx)
label_flip.append(this_flip)
if len(bad_labels):
msg = ('source space does not contain any vertices for %d label%s:\n%s'
% (len(bad_labels), _pl(bad_labels), bad_labels))
if not allow_empty:
raise ValueError(msg)
else:
msg += '\nAssigning all-zero time series.'
if allow_empty == 'ignore':
logger.info(msg)
else:
warn(msg)
return label_vertidx, label_flip
def _vol_src_rr(src):
return apply_trans(
src[0]['src_mri_t'], np.array(
[d.ravel(order='F')
for d in np.meshgrid(
*(np.arange(s) for s in src[0]['shape']),
indexing='ij')],
float).T)
def _volume_labels(src, labels, mri_resolution):
# This will create Label objects that should do the right thing for our
# given volumetric source space when used with extract_label_time_course
from .label import Label
assert src.kind == 'volume'
extra = ' when using a volume source space'
_import_nibabel('use volume atlas labels')
_validate_type(labels, ('path-like', list, tuple), 'labels' + extra)
if _check_path_like(labels):
mri = labels
infer_labels = True
else:
if len(labels) != 2:
raise ValueError('labels, if list or tuple, must have length 2, '
'got %s' % (len(labels),))
mri, labels = labels
infer_labels = False
_validate_type(mri, 'path-like', 'labels[0]' + extra)
logger.info('Reading atlas %s' % (mri,))
vol_info = _get_mri_info_data(str(mri), data=True)
atlas_data = vol_info['data']
atlas_values = np.unique(atlas_data)
if atlas_values.dtype.kind == 'f': # MGZ will be 'i'
atlas_values = atlas_values[np.isfinite(atlas_values)]
if not (atlas_values == np.round(atlas_values)).all():
raise RuntimeError('Non-integer values present in atlas, cannot '
'labelize')
atlas_values = np.round(atlas_values).astype(np.int64)
if infer_labels:
labels = {
k: v for k, v in read_freesurfer_lut()[0].items()
if v in atlas_values}
labels = _check_volume_labels(labels, mri, name='labels[1]')
assert isinstance(labels, dict)
del atlas_values
vox_mri_t = vol_info['vox_mri_t']
want = src[0].get('vox_mri_t', None)
if want is None:
raise RuntimeError(
'Cannot use volumetric atlas if no mri was supplied during '
'source space creation')
vox_mri_t, want = vox_mri_t['trans'], want['trans']
if not np.allclose(vox_mri_t, want, atol=1e-6):
raise RuntimeError(
'atlas vox_mri_t does not match that used to create the source '
'space')
src_shape = tuple(src[0]['mri_' + k] for k in ('width', 'height', 'depth'))
atlas_shape = atlas_data.shape
if atlas_shape != src_shape:
raise RuntimeError('atlas shape %s does not match source space MRI '
'shape %s' % (atlas_shape, src_shape))
atlas_data = atlas_data.ravel(order='F')
if mri_resolution:
# Upsample then just index
out_labels = list()
nnz = 0
interp = src[0]['interpolator']
# should be guaranteed by size checks above and our src interp code
assert interp.shape[0] == np.prod(src_shape)
assert interp.shape == (atlas_data.size, len(src[0]['rr']))
interp = interp[:, src[0]['vertno']]
for k, v in labels.items():
mask = atlas_data == v
csr = interp[mask]
out_labels.append(dict(csr=csr, name=k))
nnz += csr.shape[0] > 0
else:
# Use nearest values
vertno = src[0]['vertno']
rr = _vol_src_rr(src)
del src
src_values = _get_atlas_values(vol_info, rr[vertno])
vertices = [vertno[src_values == val] for val in labels.values()]
out_labels = [Label(v, hemi='lh', name=val)
for v, val in zip(vertices, labels.keys())]
nnz = sum(len(v) != 0 for v in vertices)
logger.info('%d/%d atlas regions had at least one vertex '
'in the source space' % (nnz, len(out_labels)))
return out_labels
def _dep_trans(trans):
if trans is not None:
warn('trans is no longer needed and will be removed in 0.23, do not '
'pass it as an argument', DeprecationWarning)
def _get_default_label_modes():
return sorted(_label_funcs.keys()) + ['auto']
def _get_allowed_label_modes(stc):
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
return ('mean', 'max', 'auto')
else:
return _get_default_label_modes()
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, trans=None,
mri_resolution=True, verbose=None):
# loop through source estimates and extract time series
if src is None and mode in ['mean', 'max']:
kind = 'surface'
else:
_validate_type(src, SourceSpaces)
kind = src.kind
_dep_trans(trans)
_check_option('mode', mode, _get_default_label_modes())
if kind in ('surface', 'mixed'):
if not isinstance(labels, list):
labels = [labels]
use_sparse = False
else:
labels = _volume_labels(src, labels, mri_resolution)
use_sparse = bool(mri_resolution)
n_mode = len(labels) # how many processed with the given mode
n_mean = len(src[2:]) if kind == 'mixed' else 0
n_labels = n_mode + n_mean
vertno = func = None
for si, stc in enumerate(stcs):
_validate_type(stc, _BaseSourceEstimate, 'stcs[%d]' % (si,),
'source estimate')
_check_option(
'mode', mode, _get_allowed_label_modes(stc),
'when using a vector and/or volume source estimate')
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
mode = 'mean' if mode == 'auto' else mode
else:
mode = 'mean_flip' if mode == 'auto' else mode
if vertno is None:
vertno = copy.deepcopy(stc.vertices) # avoid keeping a ref
nvert = np.array([len(v) for v in vertno])
label_vertidx, src_flip = _prepare_label_extraction(
stc, labels, src, mode, allow_empty, use_sparse)
func = _label_funcs[mode]
# make sure the stc is compatible with the source space
if len(vertno) != len(stc.vertices):
raise ValueError('stc not compatible with source space')
for vn, svn in zip(vertno, stc.vertices):
if len(vn) != len(svn):
raise ValueError('stc not compatible with source space. '
'stc has %s time series but there are %s '
'vertices in source space. Ensure you used '
'src from the forward or inverse operator, '
'as forward computation can exclude vertices.'
% (len(svn), len(vn)))
if not np.array_equal(svn, vn):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels,) + stc.data.shape[1:],
dtype=stc.data.dtype)
for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):
if vertidx is not None:
if isinstance(vertidx, sparse.csr_matrix):
assert mri_resolution
assert vertidx.shape[1] == stc.data.shape[0]
this_data = np.reshape(stc.data, (stc.data.shape[0], -1))
this_data = vertidx @ this_data
this_data.shape = \
(this_data.shape[0],) + stc.data.shape[1:]
else:
this_data = stc.data[vertidx]
label_tc[i] = func(flip, this_data)
# extract label time series for the vol src space (only mean supported)
offset = nvert[:-n_mean].sum() # effectively :2 or :0
for i, nv in enumerate(nvert[2:]):
if nv != 0:
v2 = offset + nv
label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0)
offset = v2
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='auto',
allow_empty=False, return_generator=False,
*, trans=None, mri_resolution=True,
verbose=None):
"""Extract label time course for lists of labels and source estimates.
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter (see Notes).
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
return_generator : bool
If True, a generator instead of a list is returned.
%(trans_deprecated)s
%(eltc_mri_resolution)s
%(verbose)s
Returns
-------
%(eltc_returns)s
Notes
-----
%(eltc_mode_notes)s
If encountering a ``ValueError`` due to mismatch between number of
source points in the subject source space and computed ``stc`` object set
``src`` argument to ``fwd['src']`` or ``inv['src']`` to ensure the source
space is the one actually used by the inverse to compute the source
time courses.
"""
# convert inputs to lists
if not isinstance(stcs, (list, tuple, GeneratorType)):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
label_tc = _gen_extract_label_time_course(
stcs, labels, src, mode=mode, allow_empty=allow_empty,
trans=trans, mri_resolution=mri_resolution)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
@verbose
def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum',
project=True, subjects_dir=None, src=None, verbose=None):
"""Create a STC from ECoG and sEEG sensor data.
Parameters
----------
evoked : instance of Evoked
The evoked data. Must contain ECoG, or sEEG channels.
%(trans)s
subject : str
The subject name.
distance : float
Distance (m) defining the activation "ball" of the sensor.
mode : str
Can be "sum" to do a linear sum of weights, "nearest" to
use only the weight of the nearest sensor, or "zero" to use a
zero-order hold. See Notes.
project : bool
If True, project the electrodes to the nearest ``'pial`` surface
vertex before computing distances. Only used when doing a
surface projection.
%(subjects_dir)s
src : instance of SourceSpaces
The source space.
.. warning:: If a surface source space is used, make sure that
``surf='pial'`` was used during construction.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
The surface source estimate. If src is None, a surface source
estimate will be produced, and the number of vertices will equal
the number of pial-surface vertices that were close enough to
the sensors to take on a non-zero volue. If src is not None,
a surface, volume, or mixed source estimate will be produced
(depending on the kind of source space passed) and the
vertices will match those of src (i.e., there may be me
many all-zero values in stc.data).
Notes
-----
For surface projections, this function projects the ECoG sensors to
the pial surface (if ``project``), then the activation at each pial
surface vertex is given by the mode:
- ``'sum'``
Activation is the sum across each sensor weighted by the fractional
``distance`` from each sensor. A sensor with zero distance gets weight
1 and a sensor at ``distance`` meters away (or larger) gets weight 0.
If ``distance`` is less than the distance between any two electrodes,
this will be the same as ``'nearest'``.
- ``'weighted'``
Same as ``'sum'`` except that only the nearest electrode is used,
rather than summing across electrodes within the ``distance`` radius.
As as ``'nearest'`` for vertices with distance zero to the projected
sensor.
- ``'nearest'``
The value is given by the value of the nearest sensor, up to a
``distance`` (beyond which it is zero).
If creating a Volume STC, ``src`` must be passed in, and this
function will project sEEG sensors to nearby surrounding vertices.
Then the activation at each volume vertex is given by the mode
in the same way as ECoG surface projections.
.. versionadded:: 0.22
"""
from scipy.spatial.distance import cdist, pdist
from .evoked import Evoked
_validate_type(evoked, Evoked, 'evoked')
_validate_type(mode, str, 'mode')
_validate_type(src, (None, SourceSpaces), 'src')
_check_option('mode', mode, ('sum', 'single', 'nearest'))
# create a copy of Evoked using ecog and seeg
evoked = evoked.copy().pick_types(ecog=True, seeg=True)
# get channel positions that will be used to pinpoint where
# in the Source space we will use the evoked data
pos = evoked._get_channel_positions()
# remove nan channels
nan_inds = np.where(np.isnan(pos).any(axis=1))[0]
nan_chs = [evoked.ch_names[idx] for idx in nan_inds]
evoked.drop_channels(nan_chs)
pos = [pos[idx] for idx in range(len(pos)) if idx not in nan_inds]
# coord_frame transformation from native mne "head" to MRI coord_frame
trans, _ = _get_trans(trans, 'head', 'mri', allow_none=True)
# convert head positions -> coord_frame MRI
pos = apply_trans(trans, pos)
subject = _check_subject(None, subject, False)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if src is None: # fake a full surface one
rrs = [read_surface(op.join(subjects_dir, subject,
'surf', f'{hemi}.pial'))[0]
for hemi in ('lh', 'rh')]
src = SourceSpaces([
dict(rr=rr / 1000., vertno=np.arange(len(rr)), type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI)
for rr in rrs])
del rrs
keep_all = False
else:
keep_all = True
# ensure it's a usable one
klass = dict(
surface=SourceEstimate,
volume=VolSourceEstimate,
mixed=MixedSourceEstimate,
)
_check_option('src.kind', src.kind, sorted(klass.keys()))
klass = klass[src.kind]
rrs = np.concatenate([s['rr'][s['vertno']] for s in src])
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
rrs = apply_trans(trans, rrs)
# projection will only occur with surfaces
logger.info(
f'Projecting data from {len(pos)} sensor{_pl(pos)} onto {len(rrs)} '
f'{src.kind} vertices: {mode} mode')
if project and src.kind == 'surface':
logger.info(' Projecting electrodes onto surface')
pos = _project_onto_surface(pos, dict(rr=rrs), project_rrs=True,
method='nearest')[2]
min_dist = pdist(pos).min() * 1000
logger.info(
f' Minimum {"projected " if project else ""}intra-sensor distance: '
f'{min_dist:0.1f} mm')
# compute pairwise distance between source space points and sensors
dists = cdist(rrs, pos)
assert dists.shape == (len(rrs), len(pos))
# only consider vertices within our "epsilon-ball"
# characterized by distance kwarg
vertices = np.where((dists <= distance).any(-1))[0]
logger.info(f' {len(vertices)} / {len(rrs)} non-zero vertices')
w = np.maximum(1. - dists[vertices] / distance, 0)
# now we triage based on mode
if mode in ('single', 'nearest'):
range_ = np.arange(w.shape[0])
idx = np.argmax(w, axis=1)
vals = w[range_, idx] if mode == 'single' else 1.
w.fill(0)
w[range_, idx] = vals
missing = np.where(~np.any(w, axis=0))[0]
if len(missing):
warn(f'Channel{_pl(missing)} missing in STC: '
f'{", ".join(evoked.ch_names[mi] for mi in missing)}')
nz_data = w @ evoked.data
if not keep_all:
assert src.kind == 'surface'
data = nz_data
offset = len(src[0]['vertno'])
vertices = [vertices[vertices < offset],
vertices[vertices >= offset] - offset]
else:
data = np.zeros(
(sum(len(s['vertno']) for s in src), len(evoked.times)),
dtype=nz_data.dtype)
data[vertices] = nz_data
vertices = [s['vertno'].copy() for s in src]
return klass(data, vertices, evoked.times[0], 1. / evoked.info['sfreq'],
subject=subject, verbose=verbose)
| bsd-3-clause |
kashif/scikit-learn | sklearn/linear_model/tests/test_sag.py | 33 | 28228 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import _multinomial_grad_loss_all_samples
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model.base import make_dataset
from sklearn.linear_model.logistic import _multinomial_loss_grad
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import row_norms
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.datasets import make_blobs, load_iris
from sklearn.base import clone
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
def test_multinomial_loss():
# test if the multinomial loss and gradient computations are consistent
X, y = iris.data, iris.target.astype(np.float64)
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
rng = check_random_state(42)
weights = rng.randn(n_features, n_classes)
intercept = rng.randn(n_classes)
sample_weights = rng.randn(n_samples)
np.abs(sample_weights, sample_weights)
# compute loss and gradient like in multinomial SAG
dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
loss_1, grad_1 = _multinomial_grad_loss_all_samples(dataset, weights,
intercept, n_samples,
n_features, n_classes)
# compute loss and gradient like in multinomial LogisticRegression
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
# comparison
assert_array_almost_equal(grad_1, grad_2)
assert_almost_equal(loss_1, loss_2)
def test_multinomial_loss_ground_truth():
# n_samples, n_features, n_classes = 4, 2, 3
n_classes = 3
X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
y = np.array([0, 1, 2, 0])
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
intercept = np.array([1., 0, -.2])
sample_weights = np.array([0.8, 1, 1, 0.8])
prediction = np.dot(X, weights) + intercept
logsumexp_prediction = logsumexp(prediction, axis=1)
p = prediction - logsumexp_prediction[:, np.newaxis]
loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
grad_1 = np.dot(X.T, diff)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
assert_almost_equal(loss_1, loss_2)
assert_array_almost_equal(grad_1, grad_2)
# ground truth
loss_gt = 11.680360354325961
grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
[-0.903942, +5.258745, -4.354803]])
assert_almost_equal(loss_1, loss_gt)
assert_array_almost_equal(grad_1, grad_gt)
| bsd-3-clause |
zertan/Menace | menace/bin/addStrainCoverage.py | 2 | 3094 | #!/usr/bin/env python
import numpy as np
import pandas
import xmltodict
import os
import sys
#import time
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i+n]
def binData(x,binSize):
l=np.ceil((len(x)/binSize))
out=np.zeros(l+1)
tmp=chunks(x,binSize)
for i,val in enumerate(tmp):
out[i]=np.sum(val)
return out
def interp(data,length):
calls=0
while len(data)!=length:
calls+=1
length1=len(data)
cov=np.sum(data)/float(length1)
diff=length-length1
binSize=int(np.ceil(length1/float(diff)))
if binSize==0 or diff>length1/float(2.3):
break
chunks1=chunks(data,int(binSize))
#data2=data
#print("nr " + str(diff))
vals=[]
insert_ind=[]
for i,val in enumerate(chunks1):
binSize2=min(len(val),binSize)
insert_ind.append(i*binSize+np.round(binSize2/float(2)))
vals.append(np.mean(val))
vals=np.array(vals)
insert_ind=np.array(insert_ind)
#print(str(data.shape))
#print(str(insert_ind.shape))
#print(str(vals.shape))
#print(np.array_str(vals))
data=np.insert(data,insert_ind,vals)
#print(str(data.shape))
cov2=np.sum(data)/length
data=cov/cov2*data
#print("k")
if calls>=10:
return []
#print(str(calls))
return data
originFrame=pandas.read_csv(os.path.join(sys.argv[1],'extra','origins.txt'),delimiter=" ",index_col=0)
#print(originFrame.to_string())
#originFrame=originFrame.transpose()
#print(originFrame.to_string())
#print(sys.argv[3])
#print(originFrame.index.values)
#print(originFrame.loc[sys.argv[2]]['Origin'])
#for i,arg in enumerate(sys.argv[2:]):
# print(originFrame.loc[:arg])
data=[]
origin=[]
genomeLen=[]
bacteriaName=[]
ind=0
for i,arg in enumerate(sys.argv[3:]):
#print(arg+".depth")
try:
tmpData=np.array(pandas.read_csv(arg+".depth",delimiter=" ")).astype('float')
data.append(tmpData)
#ind=ind+1
with open(os.path.join(sys.argv[2],'Headers',arg+".xml")) as fd:
obj = xmltodict.parse(fd.read())
#genomeLen.append(int(obj['DocSum']['Item'][8]['#text']))
bacteriaName.append(obj['DocSum']['Item'][1]['#text'])
genomeLen.append(int(len(tmpData)))
origin.append(int(originFrame.loc[arg]['Origin']))
data[ind]=np.roll(data[ind],-origin[ind])
ind=ind+1
except IOError as e:
print(arg+" depth not found.")
except ValueError as e:
print(arg+" origin not found.")
if not data or len(data)==1:
sys.exit()
#print("Len data: "+str(len(data)))
#ind=np.where(np.max(genomeLen))
#print(str(ind))
#print(np.array_str(ind))
#print(str(genomeLen))
ind=genomeLen.index(max(genomeLen))
genomeLen=max(genomeLen)#[int(ind[0])]
#print(str(genomeLen))
print("ind "+str(ind))# + "len "+str(len(genomeLen)))
#print(str(len(data[ind])))
dataLen=len(data[ind])
data2=np.array(data[ind]).flatten()
del data[ind]
#print("init shape")
bap=data2.shape
#print(str(genomeLen))
for i,val in enumerate(data):
tmp=interp(val,dataLen)
print(sys.argv[3+i]+" tmp shape: "+ str(tmp.shape) + " init shape: "+str(bap))
if tmp.shape==data2.shape:
data2=np.add(data2,tmp)
print(sys.argv[3])
np.save(sys.argv[3], data2)
| gpl-2.0 |
RachitKansal/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
JsNoNo/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
Obus/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
dieterich-lab/rp-bp | rpbp/analysis/rpbp_predictions/create_bf_rpkm_scatter_plot.py | 1 | 6309 | #! /usr/bin/env python3
import matplotlib
matplotlib.use('agg')
import argparse
import logging
import os
import yaml
import matplotlib.pyplot as plt
import numpy as np
import pbio.utils.bio as bio
import pbio.misc.utils as utils
import pbio.ribo.ribo_filenames as filenames
import pbio.ribo.ribo_utils as ribo_utils
logger = logging.getLogger(__name__)
default_min_rpkm = 0
default_max_rpkm = 5
default_min_bf = -10000
default_max_bf = 1000
default_title = ""
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script plots the (log) Bayes factor against the estimated "
"RPKM for all ORFs. All relevant values will be clipped according to the "
"specified arguments for viewing.")
parser.add_argument('config', help="The (yaml) config file")
parser.add_argument('name', help="The name of the dataset or replicate to plot")
parser.add_argument('out', help="The output image file")
parser.add_argument('-p', '--use-predictions', help="If this flag is present, then "
"the \"predicted ORFs\" files will be used. Otherwise, all ORFs in the dataset "
"will be visualized.", action='store_true')
parser.add_argument('-r', '--is-replicate', help="If the name corresponds to one "
"of the replicates, this flag must be used to ensure the filenames are "
"handled correctly.", action='store_true')
parser.add_argument('--title', default=default_title)
parser.add_argument('--min-rpkm', type=float, default=default_min_rpkm)
parser.add_argument('--max-rpkm', type=float, default=default_max_rpkm)
parser.add_argument('--min-bf', type=float, default=default_min_bf)
parser.add_argument('--max-bf', type=float, default=default_max_bf)
utils.add_logging_options(parser)
args = parser.parse_args()
utils.update_logging(args)
config = yaml.load(open(args.config), Loader=yaml.FullLoader)
note = config.get('note', None)
if args.is_replicate:
lengths = None
offsets = None
else:
lengths, offsets = ribo_utils.get_periodic_lengths_and_offsets(config, args.name)
fraction = config.get('smoothing_fraction', None)
reweighting_iterations = config.get('smoothing_reweighting_iterations', None)
# we will need these to get the appropriate log BFs
if args.use_predictions:
bayes_factors = filenames.get_riboseq_predicted_orfs(config['riboseq_data'], args.name,
length=lengths, offset=offsets, is_unique=True, note=note, is_smooth=True,
fraction=fraction, reweighting_iterations=reweighting_iterations)
else:
bayes_factors = filenames.get_riboseq_bayes_factors(config['riboseq_data'], args.name,
length=lengths, offset=offsets, is_unique=True, note=note, is_smooth=True,
fraction=fraction, reweighting_iterations=reweighting_iterations)
if not os.path.exists(bayes_factors):
msg = ("Could not find the Bayes factor file: {}\nIf this is for a particular "
"sample and the --merge-replicates option was used, this is not a problem. "
"Will not create this scatter plot".format(bayes_factors))
logger.warning(msg)
return
msg = "Reading Bayes factors"
logger.info(msg)
bayes_factors = bio.read_bed(bayes_factors)
# we need these to get the raw counts for calculating RPKM
# we always need all of the counts, so no need to check which ORFs
rpchi_pvalues = filenames.get_riboseq_bayes_factors(config['riboseq_data'], args.name,
length=lengths, offset=offsets, is_unique=True, note=note, is_smooth=False)
if not os.path.exists(rpchi_pvalues):
msg = ("Could not find the Rp-chi pvalues file: {}\nIf this is for a particular "
"sample and the --merge-replicates option was used, this is not a problem. "
"Will not create this scatter plot".format(rpchi_pvalues))
logger.warning(msg)
return
msg = "Reading Rp-chi pvalues"
logger.info(msg)
rpchi_pvalues = bio.read_bed(rpchi_pvalues)
msg = "Calculating RPKM values"
logger.info(msg)
# we approximate the number of mapping reads as the sum across all ORFs.
# this double-counts some reads
num_reads = np.sum(rpchi_pvalues['profile_sum'])
all_rpkm = (1e6 * rpchi_pvalues['x_1_sum']) / (rpchi_pvalues['orf_len'] * num_reads)
# only include things that have some reads in the visualization
m_rpkm = all_rpkm > 0
msg = "Creating plot"
logger.info(msg)
fig, ax = plt.subplots(figsize=(10, 5))
cm = plt.cm.gist_earth
for i, orf_label in enumerate(ribo_utils.orf_type_labels):
orf_types = ribo_utils.orf_type_labels_mapping[orf_label]
m_type = bayes_factors['orf_type'].isin(orf_types)
# now, pull out the RPKMs
if args.use_predictions:
# if we are using predictions, we have to filter and join
orf_ids = bayes_factors.loc[m_rpkm & m_type, 'id']
bfs = np.array(bayes_factors.loc[m_rpkm & m_type, 'bayes_factor_mean'])
m_ids = rpchi_pvalues['id'].isin(orf_ids)
rpkm = np.array(all_rpkm[m_ids])
else:
# otherwise ,the data frames match, so we can just use the masks
rpkm = np.array(all_rpkm[m_rpkm & m_type])
bfs = np.array(bayes_factors.loc[m_rpkm & m_type, 'bayes_factor_mean'])
rpkm = np.clip(rpkm, args.min_rpkm, args.max_rpkm)
bfs = np.clip(bfs, args.min_bf, args.max_bf)
color = i / len(ribo_utils.orf_type_labels)
color = cm(color)
label = "{} ({})".format(orf_label, len(rpkm))
ax.scatter(rpkm, bfs, label=label, color=color, edgecolor='k')
ax.set_ylim((args.min_bf * 1.5, args.max_bf * 1.5))
ax.set_xlim((args.min_rpkm * 1.5, args.max_rpkm * 1.25))
ax.set_yscale('symlog')
ax.set_xscale('symlog')
ax.set_xlabel('RPKM')
ax.set_ylabel('log BF')
lgd = ax.legend(loc='center right', bbox_to_anchor=(1.5, 0.5))
if len(args.title) > 0:
ax.set_title(args.title)
fig.savefig(args.out, bbox_inches='tight', bbox_extra_artists=(lgd,))
if __name__ == '__main__':
main()
| mit |
ahoyosid/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 14 | 20805 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'auto' can handle negative labels.
clf = RidgeClassifier(class_weight='auto')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'auto', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='auto')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
villalonreina/dipy | dipy/tests/test_scripts.py | 7 | 5119 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
Run scripts and check outputs
"""
'''
from __future__ import division, print_function, absolute_import
import glob
import os
import shutil
from os.path import (dirname, join as pjoin, abspath)
from nose.tools import assert_true, assert_false, assert_equal
import numpy.testing as nt
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.data import get_data
# Quickbundles command-line requires matplotlib:
try:
import matplotlib
no_mpl = False
except ImportError:
no_mpl = True
from dipy.tests.scriptrunner import ScriptRunner
runner = ScriptRunner(
script_sdir='bin',
debug_print_var='NIPY_DEBUG_PRINT')
run_command = runner.run_command
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.affine, affine)
def test_dipy_fit_tensor_again():
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.affine
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor",
"--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.affine
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
@nt.dec.skipif(no_mpl)
def test_qb_commandline():
with InTemporaryDirectory():
tracks_file = get_data('fornix')
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', 'tracks300.trk']
out = run_command(cmd)
assert_equal(out[0], 0)
@nt.dec.skipif(no_mpl)
def test_qb_commandline_output_path_handling():
with InTemporaryDirectory():
# Create temporary subdirectory for input and for output
os.mkdir('work')
os.mkdir('output')
os.chdir('work')
tracks_file = get_data('fornix')
# Need to specify an output directory with a "../" style path
# to trigger old bug.
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', os.path.join('..', 'output', 'tracks300.trk')]
out = run_command(cmd)
assert_equal(out[0], 0)
# Make sure the files were created in the output directory
os.chdir('../')
output_files_list = glob.glob('output/tracks300_*.trk')
assert_true(output_files_list)
'''
| bsd-3-clause |
rlowrance/re-avm | chart-03.py | 1 | 8395 | '''create charts showing results of rfval.py
INVOCATION
python chart-03.py [--data] [--test]
INPUT FILES
INPUT/rfval/YYYYMM.pickle
OUTPUT FILES
WORKING/chart-03/[test-]data.pickle
WORKING/chart-03/[test-]VAR-YYYY[-MM].pdf
where
VAR in {max_depth | max_features}
YYYY in {2004 | 2005 | 2006 | 2007 | 2008 | 2009}
MM in {02 | 05 | 08 | 11}
'''
from __future__ import division
import cPickle as pickle
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
import random
import sys
from AVM import AVM
from Bunch import Bunch
from columns_contain import columns_contain
from Logger import Logger
from ParseCommandLine import ParseCommandLine
from Path import Path
from rfval import ResultKey, ResultValue
cc = columns_contain
def usage(msg=None):
print __doc__
if msg is not None:
print msg
sys.exit(1)
def make_control(argv):
# return a Bunch
print argv
if len(argv) not in (1, 2, 3):
usage('invalid number of arguments')
pcl = ParseCommandLine(argv)
arg = Bunch(
base_name='chart-03',
data=pcl.has_arg('--data'),
test=pcl.has_arg('--test'),
)
random_seed = 123
random.seed(random_seed)
dir_working = Path().dir_working()
debug = False
reduced_file_name = ('test-' if arg.test else '') + 'data.pickle'
# assure output directory exists
dir_path = dir_working + arg.base_name + '/'
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return Bunch(
arg=arg,
debug=debug,
path_in_ege=dir_working + 'rfval/*.pickle',
path_reduction=dir_path + reduced_file_name,
path_chart_base=dir_path,
random_seed=random_seed,
test=arg.test,
)
def make_chart(df, hp, control, ege_control):
'write one txt file for each n_months_back'
def make_subplot(test_period, n_months_back, loss_metric):
'mutate the default axes'
for i, n_estimators in enumerate(sorted(set(df.n_estimators))):
mask = (
(df.test_period == test_period) &
(df.n_months_back == n_months_back) &
(df.n_estimators == n_estimators) &
(~df.max_depth.isnull() if hp == 'max_depth' else ~df.max_features.isnull())
)
subset = df.loc[mask]
if hp == 'max_depth':
x_values = sorted(set(subset.max_depth))
assert len(x_values) == len(subset)
x = np.empty(len(x_values), dtype=int)
y = np.empty(len(x_values), dtype=float)
for ii, max_depth_value in enumerate(x_values):
# select one row
mask2 = subset.max_depth == max_depth_value
subset2 = subset.loc[mask2]
assert len(subset2) == 1
row = subset2.iloc[0]
x[ii] = row['max_depth']
y[ii] = row[loss_metric]
else:
assert hp == 'max_features'
x_values = (1, 'sqrt', 'log2', 0.1, 0.3, 'auto')
if len(x_values) != len(subset):
pdb.set_trace()
assert len(x_values) == len(subset)
x = np.empty(len(x_values), dtype=object)
y = np.empty(len(x_values), dtype=float)
for ii, max_features_value in enumerate(x_values):
# select one row
mask2 = subset.max_features == max_features_value
subset2 = subset.loc[mask2]
assert len(subset2) == 1
row = subset2.iloc[0]
x[ii] = row['max_features']
y[ii] = row[loss_metric]
plt.plot(y / 1000.0,
label=('n_estimators: %d' % n_estimators),
linestyle=[':', '-.', '--', '-'][i % 4],
color='bgrcmykw'[i % 8],
)
plt.xticks(range(len(y)), x, size='xx-small', rotation='vertical')
plt.yticks(size='xx-small')
plt.title('yr-mo %s-%s bk %d' % (test_period[:4], test_period[4:], n_months_back),
loc='left',
fontdict={'fontsize': 'xx-small', 'style': 'italic'},
)
return
def make_figure(year, months):
print 'make_figure', hp, year, months
test_periods_typical = [str(year * 100 + month)
for month in months
]
test_periods = ('200902',) if year == 2009 else test_periods_typical
plt.figure() # new figure
# plt.suptitle('Loss by Test Period, Tree Max Depth, N Trees') # overlays the subplots
loss_metric = 'rmse'
loss_metric = 'mae'
axes_number = 0
n_months_backs = range(1, 7, 1)
last_test_period_index = len(test_periods) - 1
last_n_months_back_index = len(n_months_backs) - 1
for test_period_index, test_period in enumerate(test_periods):
for n_months_back_index, n_months_back in enumerate(n_months_backs):
axes_number += 1 # count across rows
plt.subplot(len(test_periods), len(n_months_backs), axes_number)
make_subplot(test_period, n_months_back, loss_metric)
if test_period_index == last_test_period_index:
# annotate the bottom row only
if n_months_back_index == 0:
plt.xlabel(hp)
plt.ylabel('%s x $1000' % loss_metric)
if n_months_back_index == last_n_months_back_index:
plt.legend(loc='best', fontsize=5)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
out_suffix = '-%02d' % months if len(months) == 1 else ''
plt.savefig(control.path_chart_base + hp + '-' + str(year) + out_suffix + '.pdf')
plt.close()
for year in (2004, 2005, 2006, 2007, 2008, 2009):
months = (2,) if year == 2009 else (2, 5, 8, 11)
for month in months:
make_figure(year, (month,))
make_figure(year, months)
if control.test:
break
def make_data(control):
'return data frame, ege_control'
def process_file(path, rows_list):
'mutate rows_list to include gscv object info at path'
print 'reducing', path
with open(path, 'rb') as f:
rfval_result, ege_control = pickle.load(f)
for k, v in rfval_result.iteritems():
actuals = v.actuals.values
predictions = v.predictions
errors = actuals - predictions
rmse = np.sqrt(np.sum(errors * errors) / (1.0 * len(errors)))
median_absolute_error = np.median(np.abs(errors))
row = {
'n_months_back': k.n_months_back,
'n_estimators': k.n_estimators,
'max_depth': k.max_depth,
'max_features': k.max_features,
'test_period': str(k.yyyymm),
'rmse': rmse,
'mae': median_absolute_error,
}
rows_list.append(row)
rows_list = []
for file in glob.glob(control.path_in_ege):
ege_control = process_file(file, rows_list)
df = pd.DataFrame(rows_list)
return df, ege_control # return last ege_control, not all
def main(argv):
control = make_control(argv)
sys.stdout = Logger(base_name=control.arg.base_name)
print control
if control.arg.data:
df, ege_control = make_data(control)
with open(control.path_reduction, 'wb') as f:
pickle.dump((df, ege_control, control), f)
else:
with open(control.path_reduction, 'rb') as f:
df, ege_control, data_control = pickle.load(f)
make_chart(df, 'max_depth', control, ege_control)
make_chart(df, 'max_features', control, ege_control)
print control
if control.test:
print 'DISCARD OUTPUT: test'
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
pd.DataFrame()
np.array()
AVM()
ResultKey
ResultValue
main(sys.argv)
| bsd-3-clause |
florentchandelier/zipline | zipline/utils/calendars/exchange_calendar_cme.py | 7 | 3143 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from pandas.tseries.holiday import (
USPresidentsDay,
USLaborDay,
USThanksgivingDay,
GoodFriday
)
from pytz import timezone
# Useful resources for making changes to this file:
# http://www.cmegroup.com/tools-information/holiday-calendar.html
from .trading_calendar import TradingCalendar, HolidayCalendar
from .us_holidays import (
USNewYearsDay,
Christmas,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
USBlackFridayInOrAfter1993,
USNationalDaysofMourning,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay)
class CMEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for CME
Open Time: 5:00 PM, America/Chicago
Close Time: 5:00 PM, America/Chicago
Regularly-Observed Holidays:
- New Years Day
- Good Friday
- Christmas
"""
@property
def name(self):
return "CME"
@property
def tz(self):
return timezone('America/Chicago')
@property
def open_time(self):
return time(17, 1)
@property
def close_time(self):
return time(17)
@property
def open_offset(self):
return -1
@property
def regular_holidays(self):
# The CME has different holiday rules depending on the type of
# instrument. For example, http://www.cmegroup.com/tools-information/holiday-calendar/files/2016-4th-of-july-holiday-schedule.pdf # noqa
# shows that Equity, Interest Rate, FX, Energy, Metals & DME Products
# close at 1200 CT on July 4, 2016, while Grain, Oilseed & MGEX
# Products and Livestock, Dairy & Lumber products are completely
# closed.
# For now, we will treat the CME as having a single calendar, and just
# go with the most conservative hours - and treat July 4 as an early
# close at noon.
return HolidayCalendar([
USNewYearsDay,
GoodFriday,
Christmas,
])
@property
def adhoc_holidays(self):
return USNationalDaysofMourning
@property
def special_closes(self):
return [(
time(12),
HolidayCalendar([
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
])
)]
| apache-2.0 |
laurentgo/arrow | dev/archery/archery/lang/python.py | 3 | 7570 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import tokenize
from contextlib import contextmanager
try:
from numpydoc.validate import Docstring, validate
except ImportError:
have_numpydoc = False
else:
have_numpydoc = True
from ..utils.command import Command, capture_stdout, default_bin
class Flake8(Command):
def __init__(self, flake8_bin=None):
self.bin = default_bin(flake8_bin, "flake8")
class Autopep8(Command):
def __init__(self, autopep8_bin=None):
self.bin = default_bin(autopep8_bin, "autopep8")
@capture_stdout()
def run_captured(self, *args, **kwargs):
return self.run(*args, **kwargs)
def _tokenize_signature(s):
lines = s.encode('ascii').splitlines()
generator = iter(lines).__next__
return tokenize.tokenize(generator)
def _convert_typehint(tokens):
names = []
opening_bracket_reached = False
for token in tokens:
# omit the tokens before the opening bracket
if not opening_bracket_reached:
if token.string == '(':
opening_bracket_reached = True
else:
continue
if token.type == 1: # type 1 means NAME token
names.append(token)
else:
if len(names) == 1:
yield (names[0].type, names[0].string)
elif len(names) == 2:
# two "NAME" tokens follow each other which means a cython
# typehint like `bool argument`, so remove the typehint
# note that we could convert it to python typehints, but hints
# are not supported by _signature_fromstr
yield (names[1].type, names[1].string)
elif len(names) > 2:
raise ValueError('More than two NAME tokens follow each other')
names = []
yield (token.type, token.string)
def inspect_signature(obj):
"""
Custom signature inspection primarily for cython generated callables.
Cython puts the signatures to the first line of the docstrings, which we
can reuse to parse the python signature from, but some gymnastics are
required, like removing the cython typehints.
It converts the cython signature:
array(obj, type=None, mask=None, size=None, from_pandas=None,
bool safe=True, MemoryPool memory_pool=None)
To:
<Signature (obj, type=None, mask=None, size=None, from_pandas=None,
safe=True, memory_pool=None)>
"""
cython_signature = obj.__doc__.splitlines()[0]
cython_tokens = _tokenize_signature(cython_signature)
python_tokens = _convert_typehint(cython_tokens)
python_signature = tokenize.untokenize(python_tokens)
return inspect._signature_fromstr(inspect.Signature, obj, python_signature)
class NumpyDoc:
def __init__(self, symbols=None):
if not have_numpydoc:
raise RuntimeError(
'Numpydoc is not available, install the development version '
'with command: pip install '
'git+https://github.com/numpy/numpydoc'
)
self.symbols = set(symbols or {'pyarrow'})
def traverse(self, fn, obj, from_package):
"""Apply a function on publicly exposed API components.
Recursively iterates over the members of the passed object. It omits
any '_' prefixed and thirdparty (non pyarrow) symbols.
Parameters
----------
obj : Any
from_package : string, default 'pyarrow'
Predicate to only consider objects from this package.
"""
todo = [obj]
seen = set()
while todo:
obj = todo.pop()
if obj in seen:
continue
else:
seen.add(obj)
fn(obj)
for name in dir(obj):
if name.startswith('_'):
continue
member = getattr(obj, name)
module = getattr(member, '__module__', None)
if not (module and module.startswith(from_package)):
continue
todo.append(member)
@contextmanager
def _apply_patches(self):
"""
Patch Docstring class to bypass loading already loaded python objects.
"""
orig_load_obj = Docstring._load_obj
orig_signature = inspect.signature
@staticmethod
def _load_obj(obj):
# By default it expects a qualname and import the object, but we
# have already loaded object after the API traversal.
if isinstance(obj, str):
return orig_load_obj(obj)
else:
return obj
def signature(obj):
# inspect.signature tries to parse __text_signature__ if other
# properties like __signature__ doesn't exists, but cython
# doesn't set that property despite that embedsignature cython
# directive is set. The only way to inspect a cython compiled
# callable's signature to parse it from __doc__ while
# embedsignature directive is set during the build phase.
# So path inspect.signature function to attempt to parse the first
# line of callable.__doc__ as a signature.
try:
return orig_signature(obj)
except Exception as orig_error:
try:
return inspect_signature(obj)
except Exception:
raise orig_error
try:
Docstring._load_obj = _load_obj
inspect.signature = signature
yield
finally:
Docstring._load_obj = orig_load_obj
inspect.signature = orig_signature
def validate(self, from_package='', allow_rules=None,
disallow_rules=None):
results = []
def callback(obj):
result = validate(obj)
errors = []
for errcode, errmsg in result.get('errors', []):
if allow_rules and errcode not in allow_rules:
continue
if disallow_rules and errcode in disallow_rules:
continue
errors.append((errcode, errmsg))
if len(errors):
result['errors'] = errors
results.append((obj, result))
with self._apply_patches():
for symbol in self.symbols:
try:
obj = Docstring._load_obj(symbol)
except (ImportError, AttributeError):
print('{} is not available for import'.format(symbol))
else:
self.traverse(callback, obj, from_package=from_package)
return results
| apache-2.0 |
larsmans/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/image_interp.py | 6 | 1925 | #!/usr/bin/env python
"""
The same (small) array, interpolated with three different
interpolation methods.
The center of the pixel at A[i,j] is plotted at i+0.5, i+0.5. If you
are using interpolation='nearest', the region bounded by (i,j) and
(i+1,j+1) will have the same color. If you are using interpolation,
the pixel center will have the same color as it does with nearest, but
other pixels will be interpolated between the neighboring pixels.
Earlier versions of matplotlib (<0.63) tried to hide the edge effects
from you by setting the view limits so that they would not be visible.
A recent bugfix in antigrain, and a new implementation in the
matplotlib._image module which takes advantage of this fix, no longer
makes this necessary. To prevent edge effects, when doing
interpolation, the matplotlib._image module now pads the input array
with identical pixels around the edge. e.g., if you have a 5x5 array
with colors a-y as below
a b c d e
f g h i j
k l m n o
p q r s t
u v w x y
the _image module creates the padded array,
a a b c d e e
a a b c d e e
f f g h i j j
k k l m n o o
p p q r s t t
o u v w x y y
o u v w x y y
does the interpolation/resizing, and then extracts the central region.
This allows you to plot the full range of your array w/o edge effects,
and for example to layer multiple images of different sizes over one
another with different interpolation methods - see
examples/layer_images.py. It also implies a performance hit, as this
new temporary, padded array must be created. Sophisticated
interpolation also implies a performance hit, so if you need maximal
performance or have very large images, interpolation='nearest' is
suggested.
"""
from pylab import *
A = rand(5,5)
figure(1)
imshow(A, interpolation='nearest')
grid(True)
figure(2)
imshow(A, interpolation='bilinear')
grid(True)
figure(3)
imshow(A, interpolation='bicubic')
grid(True)
show()
| mit |
MJuddBooth/pandas | pandas/tests/indexes/interval/test_interval.py | 1 | 52262 | from __future__ import division
from itertools import permutations
import re
import numpy as np
import pytest
from pandas.compat import lzip
import pandas as pd
from pandas import (
Index, Interval, IntervalIndex, Timedelta, Timestamp, date_range,
interval_range, isna, notna, timedelta_range)
import pandas.core.common as com
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
@pytest.mark.parametrize('breaks', [
[1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608],
[-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf],
pd.to_datetime(['20170101', '20170202', '20170303', '20170404']),
pd.to_timedelta(['1ns', '2ms', '3s', '4M', '5H', '6D'])])
def test_length(self, closed, breaks):
# GH 18789
index = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.length
expected = Index(iv.length for iv in index)
tm.assert_index_equal(result, expected)
# with NA
index = index.insert(1, np.nan)
result = index.length
expected = Index(iv.length if notna(iv) else iv for iv in index)
tm.assert_index_equal(result, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert index.hasnans is False
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans is True
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex(index._ndarray_values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right')])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with pytest.raises(ValueError, match=msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with pytest.raises(ValueError, match=msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_is_unique_interval(self, closed):
"""
Interval specific tests for is_unique in addition to base class tests
"""
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique is True
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique is True
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique is True
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_frame_repr(self):
# https://github.com/pandas-dev/pandas/pull/24134/files
df = pd.DataFrame({'A': [1, 2, 3, 4]},
index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4]))
result = repr(df)
expected = (
' A\n'
'(0, 1] 1\n'
'(1, 2] 2\n'
'(2, 3] 3\n'
'(3, 4] 4'
)
assert result == expected
# TODO: check this behavior is consistent with test_interval_new.py
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_value(self):
with pytest.raises(KeyError, match="^0$"):
self.index.get_loc(0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
with pytest.raises(KeyError, match="^-1$"):
self.index.get_loc(-1)
with pytest.raises(KeyError, match="^3$"):
self.index.get_loc(3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='intp'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='intp'))
assert idx.get_loc(3) == 1
with pytest.raises(KeyError, match=r"^3\.5$"):
idx.get_loc(3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
msg = ("'can only get slices from an IntervalIndex if bounds are"
" non-overlapping and all monotonic increasing or decreasing'")
with pytest.raises(KeyError, match=msg):
index.slice_locs(1, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
msg = r"Interval\(2, 3, closed='right'\)"
with pytest.raises(KeyError, match=msg):
self.index.get_loc(Interval(2, 3))
msg = r"Interval\(-1, 0, closed='left'\)"
with pytest.raises(KeyError, match=msg):
self.index.get_loc(Interval(-1, 0, 'left'))
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [3, Interval(1, 4)])
def test_get_loc_length_one(self, item, closed):
# GH 20921
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_loc(item)
assert result == 0
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_get_loc_datetimelike_nonoverlapping(self, breaks):
# GH 20636
# nonoverlapping = IntervalIndex method and no i8 conversion
index = IntervalIndex.from_breaks(breaks)
value = index[0].mid
result = index.get_loc(value)
expected = 0
assert result == expected
interval = Interval(index[0].left, index[1].right)
result = index.get_loc(interval)
expected = slice(0, 2)
assert result == expected
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_loc_datetimelike_overlapping(self, arrays):
# GH 20636
# overlapping = IntervalTree method with i8 conversion
index = IntervalIndex.from_arrays(*arrays)
value = index[0].mid + Timedelta('12 hours')
result = np.sort(index.get_loc(value))
expected = np.array([0, 1], dtype='intp')
assert tm.assert_numpy_array_equal(result, expected)
interval = Interval(index[0].left, index[1].right)
result = np.sort(index.get_loc(interval))
expected = np.array([0, 1, 2], dtype='intp')
assert tm.assert_numpy_array_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [
[3], np.arange(1, 5), [Interval(1, 4)], interval_range(1, 4)])
def test_get_indexer_length_one(self, item, closed):
# GH 17284
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_indexer(item)
expected = np.array([0] * len(item), dtype='intp')
tm.assert_numpy_array_equal(result, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_reindexer_datetimelike(self, arrays):
# GH 20636
index = IntervalIndex.from_arrays(*arrays)
tuples = [(index[0].left, index[0].left + pd.Timedelta('12H')),
(index[-1].right - pd.Timedelta('12H'), index[-1].right)]
target = IntervalIndex.from_tuples(tuples)
result = index._get_reindexer(target)
expected = np.array([0, 3], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_maybe_convert_i8(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
# intervalindex
result = index._maybe_convert_i8(index)
expected = IntervalIndex.from_breaks(breaks.asi8)
tm.assert_index_equal(result, expected)
# interval
interval = Interval(breaks[0], breaks[1])
result = index._maybe_convert_i8(interval)
expected = Interval(breaks[0].value, breaks[1].value)
assert result == expected
# datetimelike index
result = index._maybe_convert_i8(breaks)
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
# datetimelike scalar
result = index._maybe_convert_i8(breaks[0])
expected = breaks[0].value
assert result == expected
# list-like of datetimelike scalars
result = index._maybe_convert_i8(list(breaks))
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
date_range('2018-01-01', periods=5),
timedelta_range('0 days', periods=5)])
def test_maybe_convert_i8_nat(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
to_convert = breaks._constructor([pd.NaT] * 3)
expected = pd.Float64Index([np.nan] * 3)
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
to_convert = to_convert.insert(0, breaks[0])
expected = expected.insert(0, float(breaks[0].value))
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
np.arange(5, dtype='int64'),
np.arange(5, dtype='float64')], ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_numeric(self, breaks, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
key = make_key(breaks)
# no conversion occurs for numeric
result = index._maybe_convert_i8(key)
assert result is key
@pytest.mark.parametrize('breaks1, breaks2', permutations([
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], 2), ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks1)
key = make_key(breaks2)
msg = ('Cannot index an IntervalIndex of subtype {dtype1} with '
'values of dtype {dtype2}')
msg = re.escape(msg.format(dtype1=breaks1.dtype, dtype2=breaks2.dtype))
with pytest.raises(ValueError, match=msg):
index._maybe_convert_i8(key)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
# TODO: check this behavior is consistent with test_interval_new.py
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
@pytest.mark.parametrize("sort", [None, False])
def test_union(self, closed, sort):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index[::-1].union(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
result = other[::-1].union(index, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
tm.assert_index_equal(index.union(index, sort=sort), index)
tm.assert_index_equal(index.union(index[:1], sort=sort), index)
# GH 19101: empty result, same dtype
index = IntervalIndex(np.array([], dtype='int64'), closed=closed)
result = index.union(index, sort=sort)
tm.assert_index_equal(result, index)
# GH 19101: empty result, different dtypes
other = IntervalIndex(np.array([], dtype='float64'), closed=closed)
result = index.union(other, sort=sort)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, closed, sort):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index[::-1].intersection(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
result = other[::-1].intersection(index, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
tm.assert_index_equal(index.intersection(index, sort=sort), index)
# GH 19101: empty result, same dtype
other = IntervalIndex.from_breaks(range(300, 314), closed=closed)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
breaks = np.arange(300, 314, dtype='float64')
other = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_difference(self, closed, sort):
index = IntervalIndex.from_arrays([1, 0, 3, 2],
[1, 2, 3, 4],
closed=closed)
result = index.difference(index[:1], sort=sort)
expected = index[1:]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# GH 19101: empty result, same dtype
result = index.difference(index, sort=sort)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(index.left.astype('float64'),
index.right, closed=closed)
result = index.difference(other, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference(self, closed, sort):
index = self.create_index(closed=closed)
result = index[1:].symmetric_difference(index[:-1], sort=sort)
expected = IntervalIndex([index[0], index[-1]])
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
# GH 19101: empty result, same dtype
result = index.symmetric_difference(index, sort=sort)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(index.left.astype('float64'),
index.right, closed=closed)
result = index.symmetric_difference(other, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
@pytest.mark.parametrize("sort", [None, False])
def test_set_operation_errors(self, closed, op_name, sort):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# non-IntervalIndex
msg = ('the other index needs to be an IntervalIndex too, but '
'was type Int64Index')
with pytest.raises(TypeError, match=msg):
set_op(Index([1, 2, 3]), sort=sort)
# mixed closed
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with pytest.raises(ValueError, match=msg):
set_op(other, sort=sort)
# GH 19016: incompatible dtypes
other = interval_range(Timestamp('20180101'), periods=9, closed=closed)
msg = ('can only do {op} between two IntervalIndex objects that have '
'compatible dtypes').format(op=op_name)
with pytest.raises(TypeError, match=msg):
set_op(other, sort=sort)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with pytest.raises(TypeError, match='unorderable types'):
self.index > 0
with pytest.raises(TypeError, match='unorderable types'):
self.index <= 0
msg = r"unorderable types: Interval\(\) > int\(\)"
with pytest.raises(TypeError, match=msg):
self.index > np.arange(2)
msg = "Lengths must match to compare"
with pytest.raises(ValueError, match=msg):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
msg = ("missing values must be missing in the same location both left"
" and right sides")
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_datetime(self, tz):
start = Timestamp('2000-01-01', tz=tz)
dates = date_range(start=start, periods=10)
index = IntervalIndex.from_breaks(dates)
# test mid
start = Timestamp('2000-01-01T12:00', tz=tz)
expected = date_range(start=start, periods=9)
tm.assert_index_equal(index.mid, expected)
# __contains__ doesn't check individual points
assert Timestamp('2000-01-01', tz=tz) not in index
assert Timestamp('2000-01-01T12', tz=tz) not in index
assert Timestamp('2000-01-02', tz=tz) not in index
iv_true = Interval(Timestamp('2000-01-01T08', tz=tz),
Timestamp('2000-01-01T18', tz=tz))
iv_false = Interval(Timestamp('1999-12-31', tz=tz),
Timestamp('2000-01-01', tz=tz))
assert iv_true in index
assert iv_false not in index
# .contains does check individual points
assert not index.contains(Timestamp('2000-01-01', tz=tz))
assert index.contains(Timestamp('2000-01-01T12', tz=tz))
assert index.contains(Timestamp('2000-01-02', tz=tz))
assert index.contains(iv_true)
assert not index.contains(iv_false)
# test get_indexer
start = Timestamp('1999-12-31T12:00', tz=tz)
target = date_range(start=start, periods=7, freq='12H')
actual = index.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
start = Timestamp('2000-01-08T18:00', tz=tz)
target = date_range(start=start, periods=7, freq='6H')
actual = index.get_indexer(target)
expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with pytest.raises(ValueError, match=msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', otherwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
@pytest.mark.parametrize('start, shift, na_value', [
(0, 1, np.nan),
(Timestamp('2018-01-01'), Timedelta('1 day'), pd.NaT),
(Timedelta('0 days'), Timedelta('1 day'), pd.NaT)])
def test_is_overlapping(self, start, shift, na_value, closed):
# GH 23309
# see test_interval_tree.py for extensive tests; interface tests here
# non-overlapping
tuples = [(start + n * shift, start + (n + 1) * shift)
for n in (0, 2, 4)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# non-overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# overlapping
tuples = [(start + n * shift, start + (n + 2) * shift)
for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# common endpoints
tuples = [(start + n * shift, start + (n + 1) * shift)
for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
expected = closed == 'both'
assert result is expected
# common endpoints with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
assert result is expected
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)),
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)),
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10))])
def test_to_tuples(self, tuples):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples()
expected = Index(com.asarray_tuplesafe(tuples))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)) + [np.nan],
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)) + [np.nan],
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10)) + [np.nan]])
@pytest.mark.parametrize('na_tuple', [True, False])
def test_to_tuples_na(self, tuples, na_tuple):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples(na_tuple=na_tuple)
# check the non-NA portion
expected_notna = Index(com.asarray_tuplesafe(tuples[:-1]))
result_notna = result[:-1]
tm.assert_index_equal(result_notna, expected_notna)
# check the NA portion
result_na = result[-1]
if na_tuple:
assert isinstance(result_na, tuple)
assert len(result_na) == 2
assert all(isna(x) for x in result_na)
else:
assert isna(result_na)
def test_nbytes(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
result = IntervalIndex.from_arrays(left, right).nbytes
expected = 64 # 4 * 8 * 2
assert result == expected
def test_itemsize(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
expected = 16 # 8 * 2
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = IntervalIndex.from_arrays(left, right).itemsize
assert result == expected
@pytest.mark.parametrize('new_closed', [
'left', 'right', 'both', 'neither'])
def test_set_closed(self, name, closed, new_closed):
# GH 21670
index = interval_range(0, 5, closed=closed, name=name)
result = index.set_closed(new_closed)
expected = interval_range(0, 5, closed=new_closed, name=name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('bad_closed', ['foo', 10, 'LEFT', True, False])
def test_set_closed_errors(self, bad_closed):
# GH 21670
index = interval_range(0, 5)
msg = "invalid option for 'closed': {closed}".format(closed=bad_closed)
with pytest.raises(ValueError, match=msg):
index.set_closed(bad_closed)
def test_is_all_dates(self):
# GH 23576
year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'))
year_2017_index = pd.IntervalIndex([year_2017])
assert not year_2017_index.is_all_dates
| bsd-3-clause |
vshtanko/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
smrjan/seldon-server | python/seldon/pipeline/auto_transforms.py | 2 | 14874 | from sklearn import preprocessing
from dateutil.parser import parse
import datetime
from collections import defaultdict
import numpy as np
import pandas as pd
import math
import itertools
from sklearn.base import BaseEstimator,TransformerMixin
import logging
from seldon.util import DeprecationHelper
logger = logging.getLogger(__name__)
class AutoTransform(BaseEstimator,TransformerMixin):
"""
Automatically transform a set of features into normalzied numeric or categorical features or dates
Parameters
----------
exclude : list str, optional
list of features to not include
include : list str, optional
features to include if None then all unless exclude used
max_values_numeric_categorical : int, optional
max number of unique values for numeric feature to treat as categorical
custom_date_formats : list str, optional
list of custom date formats to try
ignore_vals : list str, optional
list of feature values to treat as NA/ignored values
force_categorical : list str, optional
features to force to be categorical
min_cat_percent : list str, optional
min percentage for a categorical value to be kept
max_cat_percent : float, optional
max percentage for a categorical value to be kept
bool_map : dict, optional
set of string values to be treated as boolean
cat_missing_value : str, optional
string to use for missing categorical values
date_transforms : list bool, optional
which date transforms to apply [hour,month,day_of_week,year], default is all
create_date_differences : bool, optional
whether to create differences between all date variables
nan_threshold : float, optional
featurs to drop if too many nan, threshold is between 0-1 as percent
drop_constant_features : bool, optional
drop a column if its value is constant
drop duplicate columns : bool, optional
drop duplicate columns
min_max_limit : bool, optional
limit numeric cols to min and max seen in fit
"""
def __init__(self,exclude=[],include=None,max_values_numeric_categorical=0,date_cols=[],custom_date_formats=None,ignore_vals=None,force_categorical=[],min_cat_percent=0.0,max_cat_percent=1.0,bool_map={"true":1,"false":0,"1":1,"0":0,"yes":1,"no":0,"1.0":1,"0.0":0},cat_missing_val="UKN",date_transforms=[True,True,True,True],create_date_differences=False,nan_threshold=None,drop_constant_features=True,drop_duplicate_cols=True,min_max_limit=False):
super(AutoTransform, self).__init__()
self.exclude = exclude
self.include = include
self.max_values_numeric_categorical = max_values_numeric_categorical
self.scalers = {}
self.date_diff_scalers = {}
self.custom_date_formats = custom_date_formats
self.ignore_vals = ignore_vals
self.force_categorical = force_categorical
self.catValueCount = {}
self.convert_categorical = []
self.convert_date = []
self.date_cols = date_cols
self.min_cat_percent = min_cat_percent
self.max_cat_percent = max_cat_percent
self.cat_percent = {}
self.bool_map = bool_map
self.convert_bool = []
self.cat_missing_val = cat_missing_val
self.date_transforms=date_transforms
self.create_date_differences = create_date_differences
self.nan_threshold=nan_threshold
self.drop_cols = []
self.drop_constant_features=drop_constant_features
self.drop_duplicate_cols=drop_duplicate_cols
self.min_max_limit=min_max_limit
self.min_max = {}
def _scale(self,v,col):
if np.isnan(v):
return 0.0
else:
return self.scalers[col].transform([[float(v)]])[0,0]
def _scale_date_diff(self,v,col):
if np.isnan(v):
return 0.0
else:
return self.date_diff_scalers[col].transform([[float(v)]])[0,0]
@staticmethod
def _is_number(s):
try:
float(s)
return True
except ValueError:
return False
def _make_cat(self,v,col):
if not isinstance(v,basestring) and np.isnan(v):
return self.cat_missing_val
else:
if col in self.cat_percent and v in self.cat_percent[col] and self.cat_percent[col][v] >= self.min_cat_percent and self.cat_percent[col][v] <= self.max_cat_percent:
val = unicode(str(v), "utf-8")
if self._is_number(v):
val = col + "_" + val.replace(" ","_").lower()
else:
val = val.replace(" ","_").lower()
return val
else:
return np.nan
def _create_hour_features(self,v,col):
val = (v.hour/24.0) * 2*math.pi
v1 = math.sin(val)
v2 = math.cos(val)
return pd.Series({col+"_hour":"h"+str(v.hour),col+"_"+'h1':v1, col+"_"+'h2':v2})
def _create_month_features(self,v,col):
val = (v.month/12.0) * 2*math.pi
v1 = math.sin(val)
v2 = math.cos(val)
return pd.Series({col+"_month":"m"+str(v.month),col+"_"+'m1':v1, col+"_"+'m2':v2})
def _create_dayofweek_features(self,v,col):
val = (v.dayofweek/7.0) * 2*math.pi
v1 = math.sin(val)
v2 = math.cos(val)
return pd.Series({col+"_w":"w"+str(v.dayofweek),col+"_"+'w1':v1, col+"_"+'w2':v2})
def _create_year_features(self,v,col):
return pd.Series({col+"_year":"y"+str(v.year)})
def _convert_to_date(self,df,col):
if not df[col].dtype == 'datetime64[ns]':
try:
return pd.to_datetime(df[col])
except:
logger.info("failed default conversion ")
pass
for f in self.custom_date_formats:
try:
return pd.to_datetime(df[col],format=f)
except:
logger.info("failed custom conversion %s",f)
pass
return None
else:
return df[col]
def _duplicate_columns(self,frame):
groups = frame.columns.to_series().groupby(frame.dtypes).groups
dups = []
for t, v in groups.items():
dcols = frame[v].to_dict(orient="list")
vs = dcols.values()
ks = dcols.keys()
lvs = len(vs)
for i in range(lvs):
for j in range(i+1,lvs):
if vs[i] == vs[j]:
dups.append(ks[i])
break
return dups
def fit(self,df):
"""
Fit models against an input pandas dataframe
Parameters
----------
X : pandas dataframe
Returns
-------
self: object
"""
if not self.nan_threshold is None:
max_nan = float(len(df)) * self.nan_threshold
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numeric_cols = set(df.select_dtypes(include=numerics).columns)
categorical_cols = set(df.select_dtypes(exclude=numerics).columns)
if self.drop_duplicate_cols:
self.drop_cols = self._duplicate_columns(df)
logger.info("Adding duplicate cols to be dropped %s",self.drop_cols)
for col in df.columns:
if col in self.exclude:
continue
if col in self.drop_cols:
continue
elif not self.include or col in self.include:
if not self.nan_threshold is None:
num_nan = len(df) - df[col].count()
if num_nan > max_nan:
if not col in self.drop_cols:
logger.info("adding %s to drop columns %d %d",col,num_nan,max_nan)
self.drop_cols.append(col)
continue
if not self.ignore_vals is None:
df[col].replace(self.ignore_vals,np.nan,inplace=True)
df[col] = df[col].apply(lambda x: np.nan if isinstance(x, basestring) and len(x)==0 else x)
cat_counts = df[col].value_counts(normalize=True,dropna=False)
if len(cat_counts) == 1 and self.drop_constant_features:
if not col in self.drop_cols:
logger.info("adding %s to drop columns as is constant",col)
self.drop_cols.append(col)
continue
is_bool = True
for val in cat_counts.index:
if not str(val).lower() in self.bool_map.keys():
is_bool = False
break
if is_bool:
self.convert_bool.append(col)
elif df[col].dtype in numerics:
if len(cat_counts) > self.max_values_numeric_categorical and not col in self.force_categorical:
logger.info("fitting scaler for col %s",col)
dfs = df[col].dropna()
if dfs.shape[0] > 0:
arr = dfs.astype(float).values.reshape(-1,1)
self.scalers[col] = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(arr)
self.min_max[col] = (dfs.min(),dfs.max())
else:
self.convert_categorical.append(col)
self.cat_percent[col] = cat_counts
else:
if df[col].dtype == 'datetime64[ns]':
self.convert_date.append(col)
elif col in self.date_cols:
self.convert_date.append(col)
else:
self.convert_categorical.append(col)
self.cat_percent[col] = cat_counts
if self.create_date_differences:
dates_converted = pd.DataFrame([])
for col in self.convert_date:
date_converted = self._convert_to_date(df,col)
if not date_converted is None:
dates_converted[col] = date_converted
if len(dates_converted.columns)>1:
for (col1,col2) in itertools.combinations(dates_converted.columns, 2):
logger.info("training date diff scaler for %s %s",col1,col2)
d_diff = dates_converted[col1] - dates_converted[col2]
d_diff = (d_diff / np.timedelta64(1, 'D')).astype(float)
self.date_diff_scalers[col1+"_"+col2] = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(arr)
logger.info("num columns to drop %d",len(self.drop_cols))
logger.info("num scalers %d",len(self.scalers))
logger.info("num categorical %d",len(self.convert_categorical))
logger.info("num dates %d",len(self.convert_date))
logger.info("num date diffs %d",len(self.date_diff_scalers))
logger.info("num bool %d",len(self.convert_bool))
return self
def transform(self,df):
"""
transform a datframe with fitted models
Parameters
----------
X : pandas dataframe
Returns
-------
Transformed pandas dataframe
"""
df = df.drop(self.drop_cols,axis=1)
c = 0
num_bools = len(self.convert_bool)
for col in self.convert_bool:
c += 1
logger.info("convert bool %s %d/%d",col,c,num_bools)
df[col] = df[col].apply(str).apply(str.lower)
df[col] = df[col].map(self.bool_map)
c = 0
num_dates = len(self.convert_date)
dates_converted = []
for col in self.convert_date:
c += 1
logger.info("convert date %s %d/%d %s",col,c,num_dates,df[col].dtype)
date_converted = self._convert_to_date(df,col)
if not date_converted is None:
logger.info("successfully converted %s to date",col)
df[col] = date_converted
dates_converted.append(col)
if df[col].dtype == 'datetime64[ns]':
if self.date_transforms[0]:
logger.info("creating hour features")
df = pd.concat([df,df[col].apply(self._create_hour_features,col=col)],axis=1)
if self.date_transforms[1]:
logger.info("creating month features")
df = pd.concat([df,df[col].apply(self._create_month_features,col=col)],axis=1)
if self.date_transforms[2]:
logger.info("creating day of week features")
df = pd.concat([df,df[col].apply(self._create_dayofweek_features,col=col)],axis=1)
if self.date_transforms[3]:
logger.info("creating year features")
df = pd.concat([df,df[col].apply(self._create_year_features,col=col)],axis=1)
else:
logger.info("warning - failed to convert to date col %s",col)
if self.create_date_differences and len(dates_converted) > 1:
for (col1,col2) in itertools.combinations(dates_converted, 2):
logger.info("diff scaler for %s %s",col1,col2)
col_name = col1+"_"+col2
df[col_name] = df[col1] - df[col2]
df[col_name] = (df[col_name] / np.timedelta64(1, 'D')).astype(float)
if not self.ignore_vals is None:
df[col_name].replace(self.ignore_vals,np.nan,inplace=True)
df[col_name] = df[col_name].apply(self._scale_date_diff,col=col_name)
c = 0
num_cats = len(self.convert_categorical)
for col in self.convert_categorical:
if not self.ignore_vals is None:
df[col].replace(self.ignore_vals,np.nan,inplace=True)
c += 1
logger.info("convert categorical %s %d/%d ",col,c,num_cats)
df[col] = df[col].apply(self._make_cat,col=col)
num_scalers = len(self.scalers)
c = 0
for col in self.scalers:
if not self.ignore_vals is None:
df[col].replace(self.ignore_vals,np.nan,inplace=True)
if self.min_max_limit:
df[col] = df[col].apply(lambda x : self.min_max[col][0] if x < self.min_max[col][0] else x)
df[col] = df[col].apply(lambda x : self.min_max[col][1] if x > self.min_max[col][1] else x)
c += 1
logger.info("scaling col %s %d/%d",col,c,num_scalers)
df[col] = df[col].apply(self._scale,col=col)
return df
Auto_transform = DeprecationHelper(AutoTransform) | apache-2.0 |
kjung/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
petosegan/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
samuel1208/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
rohanp/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
ryanraaum/african-mtdna | popdata_sources/batai2013/process.py | 1 | 1598 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import numpy as np
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0,'SeqRange'])
with open('batai2013.csv', 'rU') as f:
header = f.readline()
data = f.readlines()
counts = np.zeros((len(data), 2), dtype=np.int)
hids = []
sites = []
for i in range(len(data)):
x = data[i].strip().split(',')
hids.append("H%s" % str(i+1).zfill(3))
sites.append(x[0])
count = x[1:]
for j in range(2):
if count[j] == '':
count[j] = '0'
counts[i,] = [int(y) for y in count]
## Validate
passed_validation = True
for i in range(len(sites)):
curr_sites = str2sites(sites[i], add16k=True)
seq = sites2seq(curr_sites, region)
mysites = seq2sites(seq)
if not mysites == curr_sites:
myseq = translate(sites2seq(mysites, region), None, '-')
if not seq == myseq:
passed_validation = False
print i, hids[i]
if passed_validation:
counter = [0] * 2
with open('processed.csv', 'w') as f:
for i in range(len(sites)):
hid = hids[i]
curr_sites = str2sites(sites[i], add16k=True)
seq = sites2seq(curr_sites, region)
mysites = ' '.join([str(x) for x in seq2sites(seq)])
for j in range(2):
prefix = metadata.ix[j,'NewPrefix']
for k in range(counts[i,j]):
counter[j] += 1
num = str(counter[j]).zfill(3)
newid = prefix + num
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/stats/_multivariate.py | 12 | 112182 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from ._discrete_distns import binom
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""
A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super(matrix_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the"
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the"
"same number of columns.")
else:
mean = np.zeros((numrows,numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""
Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
if size == 1:
#out = np.squeeze(out, axis=0)
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""
A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.multinomial : Sampling from the multinomial distribution.
"""
def __init__(self, seed=None):
super(multinomial_gen, self).__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""
Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""
Return: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[...,-1] = 1. - p[...,:-1].sum(axis=-1)
# true for bad p
pcond = np.any(p <= 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""
Return: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." % (xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""
Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""
Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""
Mean of the Multinomial distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""
Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[...,i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""
Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""
Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""
Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
"""
def __init__(self, seed=None):
super(special_ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super(ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D*np.sqrt((x*x).sum())
# Householder transformation
Hx = -D*(np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""
A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
----------
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> np.random.seed(514)
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5))
>>> x
array([[ 1. , -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1. , -0.24351129, 0.06703474],
[ 0.18366501, -0.24351129, 1. , 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super(random_correlation_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ];
the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i,i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""
Draw random correlation matrices
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
| mit |
zorroblue/scikit-learn | sklearn/feature_extraction/hashing.py | 29 | 6866 | # Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
alternate_sign : boolean, optional, default True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
non_negative : boolean, optional, default False
When True, an absolute value is applied to the features matrix prior to
returning it. When used in conjunction with alternate_sign=True, this
significantly reduces the inner product preservation property.
.. deprecated:: 0.19
This option will be removed in 0.21.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, alternate_sign=True, non_negative=False):
self._validate_params(n_features, input_type)
if non_negative:
warnings.warn("the option non_negative=True has been deprecated"
" in 0.19 and will be removed"
" in version 0.21.", DeprecationWarning)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Parameters
----------
X : array-like
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype,
self.alternate_sign)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
lavakyan/mstm-spectrum | mstm_studio/mie_theory.py | 1 | 6127 | """
code from nanophotonics/npmie repository
url: https://github.com/nanophotonics/npmie
code by Alan Sanders
modified to use in MSTM-studio by L.Aavakyan
"""
import numpy as np
from mstm_studio.mstm_spectrum import Material
try:
from scipy.special import sph_jnyn
except:
from scipy.special import spherical_yn, spherical_jn
def sph_jnyn(maxn, z):
jn = []
djn = []
yn = []
dyn = []
for n in range(0, maxn+1):
jn.append (spherical_jn(n, z))
djn.append(spherical_jn(n, z, derivative=True))
yn.append (spherical_yn(n, z))
dyn.append(spherical_yn(n, z, derivative=True))
return np.array(jn), np.array(djn), np.array(yn), np.array(dyn)
def sph_hn(n, x):
# calculate spherical hankel, h(n,x) = j(n,x) + iy(n,x) #
jn, djn, yn, dyn = sph_jnyn(n, x)
hn = jn + 1j * yn
dhn = djn + 1j * dyn
return hn, dhn
def calculate_mie_coefficients(n_max, x, m):
"""
Calculates the Mie coefficients.
:rtype : object
:param n_max:
:param x: size parameter
:param m:
"""
# calculate spherical bessels #
jn, djn, yn, dyn = sph_jnyn(n_max, x) # j(n, x), y(n, x)
jm, djm, ym, dym = sph_jnyn(n_max, m * x) # j(n, mx), y(n, mx)
# calculate spherical hankel #
hn, dhn = sph_hn(n_max, x) # h(n, x)
# calculate riccati bessel functions #
dpsi_n = [x * jn[n-1] - n * jn[n] for n in range(0, len(jn))]
dpsi_m = [m * x * jm[n-1] - n * jm[n] for n in range(0, len(jm))]
dzeta_n = [x * hn[n-1] - n * hn[n] for n in range(0, len(hn))]
a_n = (m**2 * jm * dpsi_n - jn * dpsi_m) / (m**2 * jm * dzeta_n - hn * dpsi_m)
b_n = (jm * dpsi_n - jn * dpsi_m) / (jm * dzeta_n - hn * dpsi_m)
return a_n, b_n
def calculate_mie_efficiencies(r, wavelength, n_sph, n_med):
"""
Calculates the mie efficiencies (q_scat, q_abs, q_ext, q_bscat)
for a sphere in a dielectric medium at a given wavelength.
:rtype : object
:param r: radius of the sphere
:param wavelength: wavelength of illumination
:param n_sph: complex refractive index of the sphere
:param n_med: real refractive index of the dielectric medium
:return:
"""
# calculate size parameter #
x = n_med * (2 * np.pi / wavelength) * r # x = n_med * kr, size parameter
m = n_sph / n_med
# n_max = int(np.ceil(x.real)+1) # number of terms in series expansion
n_max = int(x + 4 * x**(1.0 / 3.0) + 2) # number of terms in series expansion
q_scat = 0
q_bscat = 0
q_ext = 0
q_abs = 0
a_n, b_n = calculate_mie_coefficients(n_max, x, m)
a = 0
b = 0
for n in range(1, n_max):
a += a_n[n]
b += b_n[n]
q_scat += (2 * n + 1) * (abs(a_n[n])**2 + abs(b_n[n])**2)
q_bscat += (2 * n + 1) * ((-1)**n) * (abs(a_n[n])**2 + abs(b_n[n])**2)
q_ext += (2 * n + 1) * (a_n[n] + b_n[n]).real
q_scat *= 2 / x**2
q_bscat *= 2 / x**2
q_ext *= 2 / x**2
q_abs = q_ext - q_scat
return q_scat, q_bscat, q_ext, q_abs
def calculate_mie_spectra(wavelengths, r, material, n_medium=1.):
"""
Calculates the mie scattering and extinction efficiency of spherical
nanoparticles with radius r and given material surrounded by a medium n_med
for a set of given wavelengths.
:rtype : object
:param wavelengths: array of wavelengths to calculate spectra from
:param r: radius of the sphere
:param material: instance of Material class
:param n_med: refractive index of the surrounding dielectric medium
"""
mie_scattering = []
mie_backscattering = []
mie_extinction = []
mie_absorption = []
for wl in wavelengths:
n_sph = material.get_n(wl) + 1j * material.get_k(wl)
q_scat, q_bscat, q_ext, q_abs = calculate_mie_efficiencies(
r, wl, n_sph, n_medium
)
mie_scattering.append(q_scat)
mie_backscattering.append(q_bscat)
mie_extinction.append(q_ext)
mie_absorption.append(q_abs)
return (np.array(mie_scattering), np.array(mie_backscattering),
np.array(mie_extinction), np.array(mie_absorption))
if __name__ == '__main__':
import matplotlib.pyplot as plt
#~ diameter_np = raw_input('Enter nanoparticle diameter (nm): ')
#~ material = raw_input("Enter nanoparticle material: ")
#~ medium = raw_input("Enter surrounding medium: ")
diameter_np = material = medium = '' # test
if diameter_np == '':
diameter_np = 140.
else:
diameter_np = float(diameter_np)
if material == '':
material = 'Au'
if medium == '':
medium = 1.
else:
medium = float(medium)
mat_dict = {'Au': 'etaGold.txt', 'Ag': 'etaSilver.txt'}
material_object = Material(3) # Material(mat_dict[material])
wavelength = np.arange(300, 1000, 0.1)
mie_scattering, mie_backscattering, mie_extinction, \
mie_absorption = calculate_mie_spectra(
wavelength, diameter_np / 2.0, material_object, medium
)
# save to file
data = np.stack([wavelength, mie_scattering, mie_backscattering, \
mie_extinction, mie_absorption])
np.savetxt('MIE.dat', np.transpose(data), header='wl\tscatt\tbscatt\text\tabs')
fig = plt.figure()
# wavelength plots #
ax = fig.add_subplot(411)
ax.plot(wavelength, mie_scattering, 'r', label='scattering')
ax.set_xticklabels(ax.get_xticklabels(), visible=False)
ax.set_ylabel('scattering')
ax = fig.add_subplot(412)
ax.plot(wavelength, mie_backscattering, 'k', label='back-scattering')
ax.set_xticklabels(ax.get_xticklabels(), visible=False)
ax.set_ylabel('back-scattering')
ax = fig.add_subplot(413)
ax.plot(wavelength, mie_extinction, 'b', label='extinction')
ax.set_xticklabels(ax.get_xticklabels(), visible=False)
ax.set_ylabel('extinction')
ax = fig.add_subplot(414)
ax.plot(wavelength, mie_absorption, 'g', label='absorption')
ax.set_ylabel('absorption')
ax.set_xlabel('wavelength (nm)')
plt.tight_layout()
plt.show()
| gpl-3.0 |
lisette-espin/mrqap | libs/profiling.py | 1 | 4473 | __author__ = 'espin'
#######################################################################################
### Dependences
### Reference:
### http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/
#######################################################################################
import resource
import psutil
import sys
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from libs import utils
import copy
from threading import Thread
import time
from collections import OrderedDict
#######################################################################################
# FUNCTIONS
#######################################################################################
class Profiling():
def __init__(self, title=None, fn=None, perm=False):
self.mem_resource = OrderedDict()
self.mem_psutil = OrderedDict()
self.cpu_usage = OrderedDict()
self.virtual_memory_usage = OrderedDict()
self.swap_memory_usage = OrderedDict()
self.title = title
self.fn = fn
self.perm = perm
def check_memory(self, key):
self.memory_usage_resource(key)
self.memory_usage_psutil(key)
self.memory_cpu_usage(key)
self.plot()
def kill_if_necessary(self, vm, sm):
if vm.percent >= 85. or sm.percent >= 85.:
print('FULL MEMORY: \n- Virtual Memory: {}\n- Swap Memory: {}'.format(vm, sm))
def memory_cpu_usage(self, key):
cpu = psutil.cpu_percent(interval=None)
vm = psutil.virtual_memory()
sm = psutil.swap_memory()
self.cpu_usage[key] = int(255 * cpu / 100)
self.virtual_memory_usage[key] = vm.percent
self.swap_memory_usage[key] = sm.percent
self.kill_if_necessary(vm, sm)
def memory_usage_psutil(self, key):
# return the memory usage in MB
process = psutil.Process(os.getpid())
self.mem_psutil[key] = process.memory_info()[0] / float(2 ** 20)
def memory_usage_resource(self, key):
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
self.mem_resource[key] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
def copy(self, mem):
self.mem_resource = mem.mem_resource.deepcopy()
self.mem_psutil = mem.mem_psutil.deepcopy()
self.cpu_usage = mem.cpu_usage.deepcopy()
self.virtual_memory_usage = mem.virtual_memory_usage.deepcopy()
self.swap_memory_usage = mem.swap_memory_usage.deepcopy()
def plot(self):
'''
Plots the Memory usage in MB
:return:
'''
if self.fn is not None and self.title is not None:
labels = self.mem_resource.keys()
x = range(len(labels))
plt.figure(1)
ax1 = plt.subplot(211)
ax1.plot(x, self.mem_resource.values(), color='red', marker='o', label='mem_resource')
ax1.plot(x, self.mem_psutil.values(), color='blue', marker='o', label='mem_psutil')
ax1.set_ylabel('Memory usage in MB')
ax1.grid(True)
ax1.set_xticks(x)
ax1.set_xticklabels(labels, rotation=20, fontsize=7)
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':10})
ax2 = plt.subplot(212)
ax2.plot(x, self.cpu_usage.values(), color='black', marker='o', label='cpu_usage')
ax2.plot(x, self.virtual_memory_usage.values(), color='orange', marker='o', label='virtual_memory')
ax2.plot(x, self.swap_memory_usage.values(), color='green', marker='o', label='swap_memory')
#ax2.set_xlabel(self.xlabel)
ax2.set_ylabel('Percentage (usage)')
ax2.grid(True)
ax2.set_xticks(x)
ax2.set_xticklabels(labels, rotation=20, fontsize=7)
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax2.legend(loc='center left', prop={'size':10}) # bbox_to_anchor=(1, 0.5),
plt.suptitle('Profiling - {}'.format(self.title))
plt.tight_layout()
plt.savefig(self.fn)
plt.close()
| cc0-1.0 |
potash/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
PatrickChrist/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
tum-ens/rivus | rivus/utils/pandaspyomo.py | 2 | 7016 | """ pandaspyomo: read data from coopr.pyomo models to pandas DataFrames
Pyomo is a GAMS-like model description language for mathematical
optimization problems. This module provides functions to read data from
Pyomo model instances and result objects. Use list_entities to get a list
of all entities (sets, params, variables, objectives or constraints) inside a
pyomo instance, before get its contents by get_entity (or get_entities).
Usage:
import pandaspyomo as pdpo
pdpo.list_entities(instance, 'var')
[('EprOut', ['time', 'process', 'commodity', 'commodity']), ...
('EprIn', ['time', 'process', 'commodity', 'commodity'])]
epr = pdpo.get_entities(instance, ['EprOut', 'EprInt'])
...
"""
import coopr.pyomo as pyomo
import pandas as pd
def get_entity(instance, name):
""" Return a DataFrame for an entity in model instance.
Args:
instance: a Pyomo ConcreteModel instance
name: name of a Set, Param, Var, Constraint or Objective
Returns:
a single-columned Pandas DataFrame with domain as index
"""
# retrieve entity, its type and its onset names
entity = instance.__getattribute__(name)
labels = _get_onset_names(entity)
# extract values
if isinstance(entity, pyomo.Set):
# Pyomo sets don't have values, only elements
results = pd.DataFrame([(v, 1) for v in entity.value])
# for unconstrained sets, the column label is identical to their index
# hence, make index equal to entity name and append underscore to name
# (=the later column title) to preserve identical index names for both
# unconstrained supersets
if not labels:
labels = [name]
name = name+'_'
elif isinstance(entity, pyomo.Param):
if entity.dim() > 1:
results = pd.DataFrame([v[0]+(v[1],) for v in entity.iteritems()])
else:
results = pd.DataFrame(entity.iteritems())
else:
# create DataFrame
if entity.dim() > 1:
# concatenate index tuples with value if entity has
# multidimensional indices v[0]
results = pd.DataFrame(
[v[0]+(v[1].value,) for v in entity.iteritems()])
else:
# otherwise, create tuple from scalar index v[0]
results = pd.DataFrame(
[(v[0], v[1].value) for v in entity.iteritems()])
# check for duplicate onset names and append one to several "_" to make
# them unique, e.g. ['sit', 'sit', 'com'] becomes ['sit', 'sit_', 'com']
for k, label in enumerate(labels):
if label in labels[:k]:
labels[k] = labels[k] + "_"
if not results.empty:
# name columns according to labels + entity name
results.columns = labels + [name]
results.set_index(labels, inplace=True)
return results
def get_entities(instance, names):
""" Return one DataFrame with entities in columns and a common index.
Works only on entities that share a common domain (set or set_tuple), which
is used as index of the returned DataFrame.
Args:
instance: a Pyomo ConcreteModel instance
names: list of entity names (as returned by list_entities)
Returns:
a Pandas DataFrame with entities as columns and domains as index
"""
df = pd.DataFrame()
for name in names:
other = get_entity(instance, name)
if df.empty:
df = other
else:
index_names_before = df.index.names
df = df.join(other, how='outer')
if index_names_before != df.index.names:
df.index.names = index_names_before
return df
def list_entities(instance, entity_type):
""" Return list of sets, params, variables, constraints or objectives
Args:
instance: a Pyomo ConcreteModel object
entity_type: "set", "par", "var", "con" or "obj"
Returns:
DataFrame of entities
Example:
>>> data = read_excel('mimo-example.xlsx')
>>> model = create_model(data, range(1,25))
>>> list_entities(model, 'obj') #doctest: +NORMALIZE_WHITESPACE
Description Domain
Name
obj minimize(cost = sum of all cost types) []
"""
# helper function to discern entities by type
def filter_by_type(entity, entity_type):
if entity_type == 'set':
return isinstance(entity, pyomo.Set) and not entity.virtual
elif entity_type == 'par':
return isinstance(entity, pyomo.Param)
elif entity_type == 'var':
return isinstance(entity, pyomo.Var)
elif entity_type == 'con':
return isinstance(entity, pyomo.Constraint)
elif entity_type == 'obj':
return isinstance(entity, pyomo.Objective)
else:
raise ValueError("Unknown entity_type '{}'".format(entity_type))
# iterate through all model components and keep only
iter_entities = instance.__dict__.iteritems()
entities = sorted(
(name, entity.doc, _get_onset_names(entity))
for (name, entity) in iter_entities
if filter_by_type(entity, entity_type))
# if something was found, wrap tuples in DataFrame, otherwise return empty
if entities:
entities = pd.DataFrame(entities,
columns=['Name', 'Description', 'Domain'])
entities.set_index('Name', inplace=True)
else:
entities = pd.DataFrame()
return entities
def _get_onset_names(entity):
"""
Example:
>>> data = read_excel('mimo-example.xlsx')
>>> model = create_model(data, range(1,25))
>>> _get_onset_names(model.e_co_stock)
['t', 'sit', 'com', 'com_type']
"""
# get column titles for entities from domain set names
labels = []
if isinstance(entity, pyomo.Set):
if entity.dimen > 1:
# N-dimensional set tuples, possibly with nested set tuples within
if entity.domain:
domains = entity.domain.set_tuple
else:
domains = entity.set_tuple
for domain_set in domains:
labels.extend(_get_onset_names(domain_set))
elif entity.dimen == 1:
if entity.domain:
# 1D subset; add domain name
labels.append(entity.domain.name)
else:
# unrestricted set; add entity name
labels.append(entity.name)
else:
# no domain, so no labels needed
pass
elif isinstance(entity, (pyomo.Param, pyomo.Var, pyomo.Constraint,
pyomo.Objective)):
if entity.dim() > 0 and entity._index:
labels = _get_onset_names(entity._index)
else:
# zero dimensions, so no onset labels
pass
else:
raise ValueError("Unknown entity type!")
return labels | gpl-3.0 |
mdanielwork/intellij-community | python/helpers/pydev/pydev_ipython/matplotlibtools.py | 15 | 6107 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt4'
backend2gui['Qt5Agg'] = 'qt5'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
matplotlib.real_use = matplotlib.use
matplotlib.use = patched_use
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
matplotlib.real_is_interactive = matplotlib.is_interactive
matplotlib.is_interactive = patched_is_interactive
def _get_major_version(module):
return int(module.__version__.split('.')[0])
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
if not hasattr(matplotlib, 'rcParams'):
# matplotlib module wasn't fully imported, try later
return False
if _get_major_version(matplotlib) >= 3:
# since matplotlib 3.0, accessing `matplotlib.rcParams` lead to pyplot import,
# so we need to wait until necessary pyplot attributes will be imported as well
if 'matplotlib.pyplot' not in sys.modules:
return False
pyplot = sys.modules['matplotlib.pyplot']
if not hasattr(pyplot, 'switch_backend'):
return False
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
return True
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
return True
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
return True
| apache-2.0 |
alexandrebarachant/Grasp-and-lift-EEG-challenge | ensembling/XGB.py | 4 | 3853 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 21:19:51 2015.
@author: rc, alex
"""
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from progressbar import Bar, ETA, Percentage, ProgressBar, RotatingMarker
from preprocessing.aux import delay_preds
import xgboost as xgb
class XGB(BaseEstimator, ClassifierMixin):
"""Ensembling with eXtreme Gradient Boosting."""
def __init__(self, ensemble, n_estimators=100, max_depth=5, subsample=0.7,
nthread=12,delay=None,skip=None,subsample_data=1,partsTest=1, jump=None):
"""Init."""
self.ensemble = ensemble
self.n_estimators = n_estimators
self.max_depth = max_depth
self.subsample = subsample
self.nthread = nthread
### timecourse history parameters ###
# how many past time samples to include along with the most recent sample
self.applyPreds = delay is not None and skip is not None
# how many past time samples to include along with the most recent sample
self.delay = delay
# subsample above samples
self.skip = skip
# here can be set a custom subsampling scheme, it overrides previous params
self.jump = jump
# due to RAM limitations testing data has to be split into 'partsTest' parts
self.partsTest = partsTest
# subsampling input data as an efficient form of regularization
self.subsample_data = subsample_data
# used in bagging to set different starting points when subsampling the data
self.mdlNr = 0
self.clf = []
def fit(self, X, y):
"""Fit."""
X = X[(self.mdlNr*5 % self.subsample_data)::self.subsample_data]
y = y[(self.mdlNr*5 % self.subsample_data)::self.subsample_data]
if self.applyPreds:
if self.jump is not None:
X = delay_preds(X, delay=self.delay/self.subsample_data, skip=self.skip/self.subsample_data, jump=self.jump/self.subsample_data)
else:
X = delay_preds(X, delay=self.delay/self.subsample_data, skip=self.skip/self.subsample_data)
self.clf = []
widgets = ['Training : ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ']
pbar = ProgressBar(widgets=widgets, maxval=6)
pbar.start()
# training separate models for each event
for col in range(6):
self.clf.append(xgb.XGBClassifier(n_estimators=self.n_estimators,
max_depth=self.max_depth,
subsample=self.subsample,
nthread=self.nthread))
self.clf[col].fit(X, y[:, col])
pbar.update(col)
def _predict_proba(self,X):
"""Predict probability for each event separately, then concatenate results."""
pred = []
for col in range(6):
pred.append(self.clf[col].predict_proba(X)[:, 1])
pred = np.vstack(pred).transpose()
return pred
def predict_proba(self, X):
"""Predict probability."""
if self.applyPreds:
p = np.zeros((X.shape[0],6))
for part in range(self.partsTest):
start = part*X.shape[0]//self.partsTest-self.delay*(part>0)
stop = (part+1)*X.shape[0]//self.partsTest
X_delayed = delay_preds(X[slice(start,stop)], delay=self.delay, skip=self.skip, jump=self.jump)[self.delay*(part>0):]
start += self.delay*(part>0)
p[slice(start,stop)] += self._predict_proba(X_delayed)
X_delayed = None
return p
else:
return self._predict_proba(X)
| bsd-3-clause |
shirtsgroup/pygo | analysis/figure_generation/Tab2_plot.py | 1 | 1101 | import numpy
import matplotlib
import matplotlib.pyplot as plt
def main():
plt.rc('text',usetex=True)
matplotlib.rc('font', family = 'serif')
font = {'family' : 'serif',
'size' : 'larger'}
adsorbed = numpy.load('adsorbed_rate.out.npy')
desorbed = numpy.load('desorbed_rate.out.npy')
N = len(adsorbed)
width = 0.35
ind = numpy.arange(N)
fig,ax = plt.subplots()
rects1 = ax.bar(ind,adsorbed[:,0],width,yerr=adsorbed[:,1]/N**.5)
rects2 = ax.bar(ind+width,desorbed[:,0],width,color='g',yerr=desorbed[:,1]/N**.5)
lam = numpy.arange(0.1,0.65,.05)
ax.set_ylabel('Number of folding and unfolding transitions')
ax.set_xlabel(r'$\lambda$')
ax.set_xticks(ind+width)
ax.set_xticklabels([str(x) for x in lam])
ax.legend((rects1[0],rects2[0]),('adsorbed states','desorbed states'))
plt.xlim((-.3,N))
plt.savefig('/home/edz3fz/proteinmontecarlo/manuscripts/figures/Tab2.pdf')
plt.savefig('/home/edz3fz/proteinmontecarlo/manuscripts/figures/Tab2.eps')
plt.show()
if __name__ == '__main__':
main()
| gpl-2.0 |
SmartCheckCentrale/Fatigue | Code/main.py | 1 | 8706 | # -*- coding: utf-8 -*-
"""
Created on Tue May 2 02:23:37 2017
@author: Houssam
"""
import sys
import matplotlib.pyplot as plt
from PyQt5 import uic
from matplotlib.figure import Figure
from PyQt5.QtWidgets import QApplication, QMainWindow, QSizePolicy, QWidget
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from datapreparing import DataPreparing
import random
initial_path='Features'
class App(QWidget):
"""Launches the main window, and is the main user interface.
It has a 3 QPushButton, that are herited from uic.loadUI
which calls 'GUI/mainwindow.ui'. Open the previous file in Qt
Creator to see all the widgets. It also creates plots from itself,
that are supposed to be placed with move method.(FigureCanvas object).
This class calls Plot class in order to plot the charts.
"""
def __init__(self):
QWidget.__init__(self)
self.UI = uic.loadUi('GUI/mainwindow_.ui', self,
package='GUI')
self.list_diseases = ['ArtG','ArtH','AVC','CER','Genou','LCA','LEC','LER','NC','NEUP','PAR','T-']
self.diseases_selected = []
#self.completed = False
self.initUI()
def initUI(self):
#----------------------
# Working on ACP button
#----------------------
acp = Plot(self, width=5, height=4, mode='acp') # Creating the plot object for acp
acp.move(1200,0)
# Heriting ACP_Button from the mainwindow.ui file, see Qt Creator
ACP_Button = self.UI.ACP_Button
# Information when clicking on the button
ACP_Button.setToolTip('Effectuer une analyse en composantes principales')
# Signal/Slot, connecting the click to the function that will display the chart
ACP_Button.clicked.connect(lambda : App.on_click(self,acp))
#----------------------
# Working on Kernel button
#----------------------
kernel = Plot(self, width=5, height=4, mode='kernel') # Creating the plot object for kernel
kernel.move(1200,0)
# Heriting AKERNEL_Button from the mainwindow.ui file, see Qt Creator
AKERNEL_Button = self.UI.AKERNEL_Button
# Information when clicking on the button
AKERNEL_Button.setToolTip('Effectuer une analyse en composantes principales à noyaux')
# Signal/Slot, connecting the click to the function that will display the chart
AKERNEL_Button.clicked.connect(lambda : App.on_click(self,kernel))
#----------------------
# Working on Sparse button
#----------------------
sparse = Plot(self, width=5, height=4, mode='sparse')
sparse.move(1200,0)
# Heriting ASPARSE_Button from the mainwindow.ui file, see Qt Creator
ASPARSE_Button = self.UI.ASPARSE_Button
# Information when clicking on the button
ASPARSE_Button.setToolTip('Effectuer une analyse en composantes principales sparse')
# Signal/Slot, connecting the click to the function that will display the chart
ASPARSE_Button.clicked.connect(lambda : App.on_click(self,sparse))
# See the mainwindow.ui file on QT creator
# Adding possible disease to the possible choices of the user
self.UI.Disease_1.addItems(['Faire un choix'] + self.list_diseases + ['None'])
self.UI.Disease_2.addItems(['Faire un choix'] + self.list_diseases + ['None'])
self.UI.Disease_3.addItems(['Faire un choix'] + self.list_diseases + ['None'])
self.UI.Disease_4.addItems(['Faire un choix'] + self.list_diseases + ['None'])
# Connecting the choices of the user with the slot function
self.UI.Disease_1.activated[str].connect(self.disease_activated)
self.UI.Disease_2.activated[str].connect(self.disease_activated)
self.UI.Disease_3.activated[str].connect(self.disease_activated)
self.UI.Disease_4.activated[str].connect(self.disease_activated)
self.showMaximized() # Maximazing the size of the window
self.show() # Showing the window
def disease_activated(self,text):
if text != 'None' and text != 'Faire un choix':
self.diseases_selected += [text]
if len(self.diseases_selected) == 4:
self.completed = True
def on_click(self,m):
m.plot()
#m.plot_test() #if you want to test with the random file.
class Plot(FigureCanvas):
"""FigureCanvas object, see https://matplotlib.org/users/artists.html
It has a 3 QPushButton, that are herited from uic.loadUI
which calls 'GUI/mainwindow.ui'. Open the previous file in Qt
Creator to see all the widgets. It also creates plots from itself,
that are supposed to be placed with move method.(FigureCanvas object).
This class calls Plot class in order to plot the charts.
"""
def __init__(self, parent=None, width=5, height=4, dpi=100, mode='pca'):
""" Pris sur internet : https://pythonspot.com/en/pyqt5-matplotlib/
"""
self.visu = False
self.fig = plt.figure()
self.matplotlibWidget = MatplotlibWidget(self.fig)
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.mode = mode # Je l'ai rajouté
def plot_test(self):
""" Taken from the internet
"""
data = [random.random() for i in range(25)]
ax = self.figure.add_subplot(111)
ax.plot(data, 'r-')
ax.set_title('PyQt Matplotlib Example')
self.draw()
def plot(self) :
""" Plotting the charts
- To plot the charts, you have to select the diseases,
for example AVC, PAR, Témoin,etc...
- Then you have to prepare the data, that is to say,
only selecting patients who have those diseases,
and only sorting the data, and preparing it for the
method you want to use when visualizing the dataset.
- DataPreparing.prepare() returns a list c with the
occurences of each diseases. You can select at most
4 diseases.
- The mode is fixed when you press on one of the QPushButtons
above. Therefore self.mode is known. X_plot_method applies
the method.
- Then, data_plot_preparation gives the "ordonnées" and
"absisses" (je sais pas comment on dit en anglais et il est
2h du mat je suis KO) for every disease to be displayed.
"""
diseases_selected = ex.diseases_selected
process = DataPreparing(initial_path,diseases_selected)
tab_std,c = DataPreparing.prepare(process)
X_a = DataPreparing.X_plot_method(process,tab_std,self.mode)
X1,Y1,X2,Y2,X3,Y3,X4,Y4 = DataPreparing.data_plot_preparation(process,X_a,c)
#ax = self.figure.add_subplot(111) # Adds the plot to the place you tell when using move (voir plus haut).
self.fig.clf()
self.subplot = self.fig.add_subplot(111)
self.subplot.plot(X1, Y1, "ro", color='red', picker=4.0)
self.subplot.plot(X2, Y2, "ro", color='blue', picker=4.0)
self.subplot.plot(X3, Y3, "ro", color='green', picker=4.0)
self.subplot.plot(X4, Y4, "ro", color='yellow', picker=4.0)
self.subplot.legend()
self.matplotlibWidget.draw()
self.show()
print(X1,Y1)
"""
ax.plot(X1,Y1,cmap=plt.cm.Paired,c='orange') # Je suis pas sûr que scatter soit la bonne fonction
ax.plot(X2,Y2,cmap=plt.cm.Paired,c='red') # à utiliser, à voir
ax.plot(X3,Y3,cmap=plt.cm.Paired,c='green')
ax.plot(X4,Y4,cmap=plt.cm.Paired,c='blue')
ax.set_title('Analysis - ' + self.mode) # sets the title
self.draw # draws the plot.
print('coucou2')
"""
class MatplotlibWidget(FigureCanvas):
"""
Initialisation du matplotlib pour le plot
"""
def __init__(self, fig):
super(MatplotlibWidget, self).__init__(fig)
# Launching the general application
if __name__ == '__main__':
app=0
app = QApplication(sys.argv)
ex = App()
app.exec_()
| mit |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/IPython/core/tests/test_completer.py | 3 | 24991 | # encoding: utf-8
"""Tests for the IPython tab-completion machinery."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import unittest
from contextlib import contextmanager
import nose.tools as nt
from traitlets.config.loader import Config
from IPython.core import completer
from IPython.external.decorators import knownfailureif
from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
from IPython.utils.generics import complete_object
from IPython.utils import py3compat
from IPython.utils.py3compat import string_types, unicode_type
from IPython.testing import decorators as dec
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
@contextmanager
def greedy_completion():
ip = get_ipython()
greedy_original = ip.Completer.greedy
try:
ip.Completer.greedy = True
yield
finally:
ip.Completer.greedy = greedy_original
def test_protect_filename():
pairs = [ ('abc','abc'),
(' abc',r'\ abc'),
('a bc',r'a\ bc'),
('a bc',r'a\ \ bc'),
(' bc',r'\ \ bc'),
]
# On posix, we also protect parens and other special characters
if sys.platform != 'win32':
pairs.extend( [('a(bc',r'a\(bc'),
('a)bc',r'a\)bc'),
('a( )bc',r'a\(\ \)bc'),
('a[1]bc', r'a\[1\]bc'),
('a{1}bc', r'a\{1\}bc'),
('a#bc', r'a\#bc'),
('a?bc', r'a\?bc'),
('a=bc', r'a\=bc'),
('a\\bc', r'a\\bc'),
('a|bc', r'a\|bc'),
('a;bc', r'a\;bc'),
('a:bc', r'a\:bc'),
("a'bc", r"a\'bc"),
('a*bc', r'a\*bc'),
('a"bc', r'a\"bc'),
('a^bc', r'a\^bc'),
('a&bc', r'a\&bc'),
] )
# run the actual tests
for s1, s2 in pairs:
s1p = completer.protect_filename(s1)
nt.assert_equal(s1p, s2)
def check_line_split(splitter, test_specs):
for part1, part2, split in test_specs:
cursor_pos = len(part1)
line = part1+part2
out = splitter.split_line(line, cursor_pos)
nt.assert_equal(out, split)
def test_line_split():
"""Basic line splitter test with default specs."""
sp = completer.CompletionSplitter()
# The format of the test specs is: part1, part2, expected answer. Parts 1
# and 2 are joined into the 'line' sent to the splitter, as if the cursor
# was at the end of part1. So an empty part2 represents someone hitting
# tab at the end of the line, the most common case.
t = [('run some/scrip', '', 'some/scrip'),
('run scripts/er', 'ror.py foo', 'scripts/er'),
('echo $HOM', '', 'HOM'),
('print sys.pa', '', 'sys.pa'),
('print(sys.pa', '', 'sys.pa'),
("execfile('scripts/er", '', 'scripts/er'),
('a[x.', '', 'x.'),
('a[x.', 'y', 'x.'),
('cd "some_file/', '', 'some_file/'),
]
check_line_split(sp, t)
# Ensure splitting works OK with unicode by re-running the tests with
# all inputs turned into unicode
check_line_split(sp, [ map(unicode_type, p) for p in t] )
def test_custom_completion_error():
"""Test that errors from custom attribute completers are silenced."""
ip = get_ipython()
class A(object): pass
ip.user_ns['a'] = A()
@complete_object.when_type(A)
def complete_A(a, existing_completions):
raise TypeError("this should be silenced")
ip.complete("a.")
def test_unicode_completions():
ip = get_ipython()
# Some strings that trigger different types of completion. Check them both
# in str and unicode forms
s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
for t in s + list(map(unicode_type, s)):
# We don't need to check exact completion values (they may change
# depending on the state of the namespace, but at least no exceptions
# should be thrown and the return value should be a pair of text, list
# values.
text, matches = ip.complete(t)
nt.assert_true(isinstance(text, string_types))
nt.assert_true(isinstance(matches, list))
@dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
def test_latex_completions():
from IPython.core.latex_symbols import latex_symbols
import random
ip = get_ipython()
# Test some random unicode symbols
keys = random.sample(latex_symbols.keys(), 10)
for k in keys:
text, matches = ip.complete(k)
nt.assert_equal(len(matches),1)
nt.assert_equal(text, k)
nt.assert_equal(matches[0], latex_symbols[k])
# Test a more complex line
text, matches = ip.complete(u'print(\\alpha')
nt.assert_equals(text, u'\\alpha')
nt.assert_equals(matches[0], latex_symbols['\\alpha'])
# Test multiple matching latex symbols
text, matches = ip.complete(u'\\al')
nt.assert_in('\\alpha', matches)
nt.assert_in('\\aleph', matches)
@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
def test_back_latex_completion():
ip = get_ipython()
# do not return more than 1 matches fro \beta, only the latex one.
name, matches = ip.complete('\\β')
nt.assert_equal(len(matches), 1)
nt.assert_equal(matches[0], '\\beta')
@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
def test_back_unicode_completion():
ip = get_ipython()
name, matches = ip.complete('\\Ⅴ')
nt.assert_equal(len(matches), 1)
nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
def test_forward_unicode_completion():
ip = get_ipython()
name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
nt.assert_equal(len(matches), 1)
nt.assert_equal(matches[0], 'Ⅴ')
@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
def test_no_ascii_back_completion():
ip = get_ipython()
with TemporaryWorkingDirectory(): # Avoid any filename completions
# single ascii letter that don't have yet completions
for letter in 'fjqyJMQVWY' :
name, matches = ip.complete('\\'+letter)
nt.assert_equal(matches, [])
class CompletionSplitterTestCase(unittest.TestCase):
def setUp(self):
self.sp = completer.CompletionSplitter()
def test_delim_setting(self):
self.sp.delims = ' '
nt.assert_equal(self.sp.delims, ' ')
nt.assert_equal(self.sp._delim_expr, '[\ ]')
def test_spaces(self):
"""Test with only spaces as split chars."""
self.sp.delims = ' '
t = [('foo', '', 'foo'),
('run foo', '', 'foo'),
('run foo', 'bar', 'foo'),
]
check_line_split(self.sp, t)
def test_has_open_quotes1():
for s in ["'", "'''", "'hi' '"]:
nt.assert_equal(completer.has_open_quotes(s), "'")
def test_has_open_quotes2():
for s in ['"', '"""', '"hi" "']:
nt.assert_equal(completer.has_open_quotes(s), '"')
def test_has_open_quotes3():
for s in ["''", "''' '''", "'hi' 'ipython'"]:
nt.assert_false(completer.has_open_quotes(s))
def test_has_open_quotes4():
for s in ['""', '""" """', '"hi" "ipython"']:
nt.assert_false(completer.has_open_quotes(s))
@knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
def test_abspath_file_completions():
ip = get_ipython()
with TemporaryDirectory() as tmpdir:
prefix = os.path.join(tmpdir, 'foo')
suffixes = ['1', '2']
names = [prefix+s for s in suffixes]
for n in names:
open(n, 'w').close()
# Check simple completion
c = ip.complete(prefix)[1]
nt.assert_equal(c, names)
# Now check with a function call
cmd = 'a = f("%s' % prefix
c = ip.complete(prefix, cmd)[1]
comp = [prefix+s for s in suffixes]
nt.assert_equal(c, comp)
def test_local_file_completions():
ip = get_ipython()
with TemporaryWorkingDirectory():
prefix = './foo'
suffixes = ['1', '2']
names = [prefix+s for s in suffixes]
for n in names:
open(n, 'w').close()
# Check simple completion
c = ip.complete(prefix)[1]
nt.assert_equal(c, names)
# Now check with a function call
cmd = 'a = f("%s' % prefix
c = ip.complete(prefix, cmd)[1]
comp = [prefix+s for s in suffixes]
nt.assert_equal(c, comp)
def test_greedy_completions():
ip = get_ipython()
ip.ex('a=list(range(5))')
_,c = ip.complete('.',line='a[0].')
nt.assert_false('a[0].real' in c,
"Shouldn't have completed on a[0]: %s"%c)
with greedy_completion():
_,c = ip.complete('.',line='a[0].')
nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
def test_omit__names():
# also happens to test IPCompleter as a configurable
ip = get_ipython()
ip._hidden_attr = 1
ip._x = {}
c = ip.Completer
ip.ex('ip=get_ipython()')
cfg = Config()
cfg.IPCompleter.omit__names = 0
c.update_config(cfg)
s,matches = c.complete('ip.')
nt.assert_in('ip.__str__', matches)
nt.assert_in('ip._hidden_attr', matches)
cfg = Config()
cfg.IPCompleter.omit__names = 1
c.update_config(cfg)
s,matches = c.complete('ip.')
nt.assert_not_in('ip.__str__', matches)
nt.assert_in('ip._hidden_attr', matches)
cfg = Config()
cfg.IPCompleter.omit__names = 2
c.update_config(cfg)
s,matches = c.complete('ip.')
nt.assert_not_in('ip.__str__', matches)
nt.assert_not_in('ip._hidden_attr', matches)
s,matches = c.complete('ip._x.')
nt.assert_in('ip._x.keys', matches)
del ip._hidden_attr
def test_limit_to__all__False_ok():
ip = get_ipython()
c = ip.Completer
ip.ex('class D: x=24')
ip.ex('d=D()')
cfg = Config()
cfg.IPCompleter.limit_to__all__ = False
c.update_config(cfg)
s, matches = c.complete('d.')
nt.assert_in('d.x', matches)
def test_limit_to__all__True_ok():
ip = get_ipython()
c = ip.Completer
ip.ex('class D: x=24')
ip.ex('d=D()')
ip.ex("d.__all__=['z']")
cfg = Config()
cfg.IPCompleter.limit_to__all__ = True
c.update_config(cfg)
s, matches = c.complete('d.')
nt.assert_in('d.z', matches)
nt.assert_not_in('d.x', matches)
def test_get__all__entries_ok():
class A(object):
__all__ = ['x', 1]
words = completer.get__all__entries(A())
nt.assert_equal(words, ['x'])
def test_get__all__entries_no__all__ok():
class A(object):
pass
words = completer.get__all__entries(A())
nt.assert_equal(words, [])
def test_func_kw_completions():
ip = get_ipython()
c = ip.Completer
ip.ex('def myfunc(a=1,b=2): return a+b')
s, matches = c.complete(None, 'myfunc(1,b')
nt.assert_in('b=', matches)
# Simulate completing with cursor right after b (pos==10):
s, matches = c.complete(None, 'myfunc(1,b)', 10)
nt.assert_in('b=', matches)
s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
nt.assert_in('b=', matches)
#builtin function
s, matches = c.complete(None, 'min(k, k')
nt.assert_in('key=', matches)
def test_default_arguments_from_docstring():
doc = min.__doc__
ip = get_ipython()
c = ip.Completer
kwd = c._default_arguments_from_docstring(
'min(iterable[, key=func]) -> value')
nt.assert_equal(kwd, ['key'])
#with cython type etc
kwd = c._default_arguments_from_docstring(
'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
#white spaces
kwd = c._default_arguments_from_docstring(
'\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
def test_line_magics():
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, 'lsmag')
nt.assert_in('%lsmagic', matches)
s, matches = c.complete(None, '%lsmag')
nt.assert_in('%lsmagic', matches)
def test_cell_magics():
from IPython.core.magic import register_cell_magic
@register_cell_magic
def _foo_cellm(line, cell):
pass
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, '_foo_ce')
nt.assert_in('%%_foo_cellm', matches)
s, matches = c.complete(None, '%%_foo_ce')
nt.assert_in('%%_foo_cellm', matches)
def test_line_cell_magics():
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def _bar_cellm(line, cell):
pass
ip = get_ipython()
c = ip.Completer
# The policy here is trickier, see comments in completion code. The
# returned values depend on whether the user passes %% or not explicitly,
# and this will show a difference if the same name is both a line and cell
# magic.
s, matches = c.complete(None, '_bar_ce')
nt.assert_in('%_bar_cellm', matches)
nt.assert_in('%%_bar_cellm', matches)
s, matches = c.complete(None, '%_bar_ce')
nt.assert_in('%_bar_cellm', matches)
nt.assert_in('%%_bar_cellm', matches)
s, matches = c.complete(None, '%%_bar_ce')
nt.assert_not_in('%_bar_cellm', matches)
nt.assert_in('%%_bar_cellm', matches)
def test_magic_completion_order():
ip = get_ipython()
c = ip.Completer
# Test ordering of magics and non-magics with the same name
# We want the non-magic first
# Before importing matplotlib, there should only be one option:
text, matches = c.complete('mat')
nt.assert_equal(matches, ["%matplotlib"])
ip.run_cell("matplotlib = 1") # introduce name into namespace
# After the import, there should be two options, ordered like this:
text, matches = c.complete('mat')
nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
ip.run_cell("timeit = 1") # define a user variable called 'timeit'
# Order of user variable and line and cell magics with same name:
text, matches = c.complete('timeit')
nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
def test_dict_key_completion_string():
"""Test dictionary key completion for string keys"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns['d'] = {'abc': None}
# check completion at different stages
_, matches = complete(line_buffer="d[")
nt.assert_in("'abc'", matches)
nt.assert_not_in("'abc']", matches)
_, matches = complete(line_buffer="d['")
nt.assert_in("abc", matches)
nt.assert_not_in("abc']", matches)
_, matches = complete(line_buffer="d['a")
nt.assert_in("abc", matches)
nt.assert_not_in("abc']", matches)
# check use of different quoting
_, matches = complete(line_buffer="d[\"")
nt.assert_in("abc", matches)
nt.assert_not_in('abc\"]', matches)
_, matches = complete(line_buffer="d[\"a")
nt.assert_in("abc", matches)
nt.assert_not_in('abc\"]', matches)
# check sensitivity to following context
_, matches = complete(line_buffer="d[]", cursor_pos=2)
nt.assert_in("'abc'", matches)
_, matches = complete(line_buffer="d['']", cursor_pos=3)
nt.assert_in("abc", matches)
nt.assert_not_in("abc'", matches)
nt.assert_not_in("abc']", matches)
# check multiple solutions are correctly returned and that noise is not
ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
5: None}
_, matches = complete(line_buffer="d['a")
nt.assert_in("abc", matches)
nt.assert_in("abd", matches)
nt.assert_not_in("bad", matches)
assert not any(m.endswith((']', '"', "'")) for m in matches), matches
# check escaping and whitespace
ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
_, matches = complete(line_buffer="d['a")
nt.assert_in("a\\nb", matches)
nt.assert_in("a\\'b", matches)
nt.assert_in("a\"b", matches)
nt.assert_in("a word", matches)
assert not any(m.endswith((']', '"', "'")) for m in matches), matches
# - can complete on non-initial word of the string
_, matches = complete(line_buffer="d['a w")
nt.assert_in("word", matches)
# - understands quote escaping
_, matches = complete(line_buffer="d['a\\'")
nt.assert_in("b", matches)
# - default quoting should work like repr
_, matches = complete(line_buffer="d[")
nt.assert_in("\"a'b\"", matches)
# - when opening quote with ", possible to match with unescaped apostrophe
_, matches = complete(line_buffer="d[\"a'")
nt.assert_in("b", matches)
# need to not split at delims that readline won't split at
if '-' not in ip.Completer.splitter.delims:
ip.user_ns['d'] = {'before-after': None}
_, matches = complete(line_buffer="d['before-af")
nt.assert_in('before-after', matches)
def test_dict_key_completion_contexts():
"""Test expression contexts in which dict key completion occurs"""
ip = get_ipython()
complete = ip.Completer.complete
d = {'abc': None}
ip.user_ns['d'] = d
class C:
data = d
ip.user_ns['C'] = C
ip.user_ns['get'] = lambda: d
def assert_no_completion(**kwargs):
_, matches = complete(**kwargs)
nt.assert_not_in('abc', matches)
nt.assert_not_in('abc\'', matches)
nt.assert_not_in('abc\']', matches)
nt.assert_not_in('\'abc\'', matches)
nt.assert_not_in('\'abc\']', matches)
def assert_completion(**kwargs):
_, matches = complete(**kwargs)
nt.assert_in("'abc'", matches)
nt.assert_not_in("'abc']", matches)
# no completion after string closed, even if reopened
assert_no_completion(line_buffer="d['a'")
assert_no_completion(line_buffer="d[\"a\"")
assert_no_completion(line_buffer="d['a' + ")
assert_no_completion(line_buffer="d['a' + '")
# completion in non-trivial expressions
assert_completion(line_buffer="+ d[")
assert_completion(line_buffer="(d[")
assert_completion(line_buffer="C.data[")
# greedy flag
def assert_completion(**kwargs):
_, matches = complete(**kwargs)
nt.assert_in("get()['abc']", matches)
assert_no_completion(line_buffer="get()[")
with greedy_completion():
assert_completion(line_buffer="get()[")
assert_completion(line_buffer="get()['")
assert_completion(line_buffer="get()['a")
assert_completion(line_buffer="get()['ab")
assert_completion(line_buffer="get()['abc")
@dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
def test_dict_key_completion_bytes():
"""Test handling of bytes in dict key completion"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns['d'] = {'abc': None, b'abd': None}
_, matches = complete(line_buffer="d[")
nt.assert_in("'abc'", matches)
nt.assert_in("b'abd'", matches)
if False: # not currently implemented
_, matches = complete(line_buffer="d[b")
nt.assert_in("b'abd'", matches)
nt.assert_not_in("b'abc'", matches)
_, matches = complete(line_buffer="d[b'")
nt.assert_in("abd", matches)
nt.assert_not_in("abc", matches)
_, matches = complete(line_buffer="d[B'")
nt.assert_in("abd", matches)
nt.assert_not_in("abc", matches)
_, matches = complete(line_buffer="d['")
nt.assert_in("abc", matches)
nt.assert_not_in("abd", matches)
@dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
def test_dict_key_completion_unicode_py2():
"""Test handling of unicode in dict key completion"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns['d'] = {u'abc': None,
u'a\u05d0b': None}
_, matches = complete(line_buffer="d[")
nt.assert_in("u'abc'", matches)
nt.assert_in("u'a\\u05d0b'", matches)
_, matches = complete(line_buffer="d['a")
nt.assert_in("abc", matches)
nt.assert_not_in("a\\u05d0b", matches)
_, matches = complete(line_buffer="d[u'a")
nt.assert_in("abc", matches)
nt.assert_in("a\\u05d0b", matches)
_, matches = complete(line_buffer="d[U'a")
nt.assert_in("abc", matches)
nt.assert_in("a\\u05d0b", matches)
# query using escape
_, matches = complete(line_buffer=u"d[u'a\\u05d0")
nt.assert_in("u05d0b", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer=u"d[u'a\u05d0")
nt.assert_in(u"a\u05d0b", matches)
with greedy_completion():
_, matches = complete(line_buffer="d[")
nt.assert_in("d[u'abc']", matches)
nt.assert_in("d[u'a\\u05d0b']", matches)
_, matches = complete(line_buffer="d['a")
nt.assert_in("d['abc']", matches)
nt.assert_not_in("d[u'a\\u05d0b']", matches)
_, matches = complete(line_buffer="d[u'a")
nt.assert_in("d[u'abc']", matches)
nt.assert_in("d[u'a\\u05d0b']", matches)
_, matches = complete(line_buffer="d[U'a")
nt.assert_in("d[U'abc']", matches)
nt.assert_in("d[U'a\\u05d0b']", matches)
# query using escape
_, matches = complete(line_buffer=u"d[u'a\\u05d0")
nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer=u"d[u'a\u05d0")
nt.assert_in(u"d[u'a\u05d0b']", matches)
@dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
def test_dict_key_completion_unicode_py3():
"""Test handling of unicode in dict key completion"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns['d'] = {u'a\u05d0': None}
# query using escape
_, matches = complete(line_buffer="d['a\\u05d0")
nt.assert_in("u05d0", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer="d['a\u05d0")
nt.assert_in(u"a\u05d0", matches)
with greedy_completion():
# query using escape
_, matches = complete(line_buffer="d['a\\u05d0")
nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer="d['a\u05d0")
nt.assert_in(u"d['a\u05d0']", matches)
@dec.skip_without('numpy')
def test_struct_array_key_completion():
"""Test dict key completion applies to numpy struct arrays"""
import numpy
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
_, matches = complete(line_buffer="d['")
nt.assert_in("hello", matches)
nt.assert_in("world", matches)
# complete on the numpy struct itself
dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
('my_data', '>f4', 5)])
x = numpy.zeros(2, dtype=dt)
ip.user_ns['d'] = x[1]
_, matches = complete(line_buffer="d['")
nt.assert_in("my_head", matches)
nt.assert_in("my_data", matches)
# complete on a nested level
with greedy_completion():
ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
_, matches = complete(line_buffer="d[1]['my_head']['")
nt.assert_true(any(["my_dt" in m for m in matches]))
nt.assert_true(any(["my_df" in m for m in matches]))
@dec.skip_without('pandas')
def test_dataframe_key_completion():
"""Test dict key completion applies to pandas DataFrames"""
import pandas
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
_, matches = complete(line_buffer="d['")
nt.assert_in("hello", matches)
nt.assert_in("world", matches)
def test_dict_key_completion_invalids():
"""Smoke test cases dict key completion can't handle"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns['no_getitem'] = None
ip.user_ns['no_keys'] = []
ip.user_ns['cant_call_keys'] = dict
ip.user_ns['empty'] = {}
ip.user_ns['d'] = {'abc': 5}
_, matches = complete(line_buffer="no_getitem['")
_, matches = complete(line_buffer="no_keys['")
_, matches = complete(line_buffer="cant_call_keys['")
_, matches = complete(line_buffer="empty['")
_, matches = complete(line_buffer="name_error['")
_, matches = complete(line_buffer="d['\\") # incomplete escape
| artistic-2.0 |
jamesblunt/sympy | sympy/external/tests/test_importtools.py | 91 | 1215 | from sympy.external import import_module
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/tests/test_isotonic.py | 31 | 15103 | import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_small_number_of_samples():
x = [0, 1, 2]
y = [1, 1.1, 1.05]
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
y = np.array([10, 0, 2])
y_ = np.array([4, 4, 4])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_with_ties_in_differently_sized_groups():
"""
Non-regression test to handle issue 9432:
https://github.com/scikit-learn/scikit-learn/issues/9432
Compare against output in R:
> library("isotone")
> x <- c(0, 1, 1, 2, 3, 4)
> y <- c(0, 0, 1, 0, 0, 1)
> res1 <- gpava(x, y, ties="secondary")
> res1$x
`isotone` version: 1.1-0, 2015-07-24
R version: R version 3.3.2 (2016-10-31)
"""
x = np.array([0, 1, 1, 2, 3, 4])
y = np.array([0, 0, 1, 0, 0, 1])
y_true = np.array([0., 0.25, 0.25, 0.25, 0.25, 1.])
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true)
assert_array_almost_equal(ir.fit_transform(x, y), y_true)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_ymin_ymax():
# Test from @NelleV's issue:
# https://github.com/scikit-learn/scikit-learn/issues/6921
x = np.array([1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059,
1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377,
-0.896, -0.377, -1.327, 0.180])
y = isotonic_regression(x, y_min=0., y_max=0.1)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Also test decreasing case since the logic there is different
y = isotonic_regression(x, y_min=0., y_max=0.1, increasing=False)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Finally, test with only one bound
y = isotonic_regression(x, y_min=0., increasing=False)
assert(np.all(y >= 0))
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
| bsd-3-clause |
BeiLuoShiMen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt.py | 69 | 16846 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
import qt
except ImportError:
raise ImportError("Qt backend requires pyqt to be installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : qt.Qt.PointingHandCursor,
cursors.HAND : qt.Qt.WaitCursor,
cursors.POINTER : qt.Qt.ArrowCursor,
cursors.SELECT_REGION : qt.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one
"""
if qt.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = qt.QApplication( [" "] )
qt.QObject.connect( qApp, qt.SIGNAL( "lastWindowClosed()" ),
qApp, qt.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
qt.qApp.exec_loop()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( qt.QWidget, FigureCanvasBase ):
keyvald = { qt.Qt.Key_Control : 'control',
qt.Qt.Key_Shift : 'shift',
qt.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
qt.QWidget.__init__( self, None, "QWidget figure" )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
w,h = self.get_width_height()
self.resize( w, h )
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
qt.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQt.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
qt.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return qt.QSize( w, h )
def minumumSizeHint( self ):
return qt.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = event.text().latin1()
elif event.key() in self.keyvald.has_key:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = qt.QMainWindow( None, None, qt.Qt.WDestructiveClose )
self.window.closeEvent = self._widgetCloseEvent
centralWidget = qt.QWidget( self.window )
self.canvas.reparent( centralWidget, qt.QPoint( 0, 0 ) )
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( qt.QWidget.ClickFocus )
self.canvas.setFocus()
self.window.setCaption( "Figure %d" % num )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, centralWidget)
# Use a vertical layout for the plot and the toolbar. Set the
# stretch to all be in the plot so the toolbar doesn't resize.
self.layout = qt.QVBoxLayout( centralWidget )
self.layout.addWidget( self.canvas, 1 )
if self.toolbar:
self.layout.addWidget( self.toolbar, 0 )
self.window.setCentralWidget( centralWidget )
# Reset the window height so the canvas will be the right
# size. This ALMOST works right. The first issue is that the
# height w/ a toolbar seems to be off by just a little bit (so
# we add 4 pixels). The second is that the total width/height
# is slightly smaller that we actually want. It seems like
# the border of the window is being included in the size but
# AFAIK there is no way to get that size.
w = self.canvas.width()
h = self.canvas.height()
if self.toolbar:
h += self.toolbar.height() + 4
self.window.resize( w, h )
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _widgetCloseEvent( self, event ):
self._widgetclosed()
qt.QWidget.closeEvent( self.window, event )
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not yet supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close(True)
def set_window_title(self, title):
self.window.setCaption(title)
class NavigationToolbar2QT( NavigationToolbar2, qt.QWidget ):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.ppm', 'home'),
('Back', 'Back to previous view','back.ppm', 'back'),
('Forward', 'Forward to next view','forward.ppm', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.ppm', 'pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.ppm', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.ppm', 'save_figure'),
)
def __init__( self, canvas, parent ):
self.canvas = canvas
self.buttons = {}
qt.QWidget.__init__( self, parent )
# Layout toolbar buttons horizontally.
self.layout = qt.QHBoxLayout( self )
self.layout.setMargin( 2 )
NavigationToolbar2.__init__( self, canvas )
def _init_toolbar( self ):
basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text == None:
self.layout.addSpacing( 8 )
continue
fname = os.path.join( basedir, image_file )
image = qt.QPixmap()
image.load( fname )
button = qt.QPushButton( qt.QIconSet( image ), "", self )
qt.QToolTip.add( button, tooltip_text )
self.buttons[ text ] = button
# The automatic layout doesn't look that good - it's too close
# to the images so add a margin around it.
margin = 4
button.setFixedSize( image.width()+margin, image.height()+margin )
qt.QObject.connect( button, qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
self.layout.addWidget( button )
self.buttons[ 'Pan' ].setToggleButton( True )
self.buttons[ 'Zoom' ].setToggleButton( True )
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
self.locLabel = qt.QLabel( "", self )
self.locLabel.setAlignment( qt.Qt.AlignRight | qt.Qt.AlignVCenter )
self.locLabel.setSizePolicy(qt.QSizePolicy(qt.QSizePolicy.Ignored,
qt.QSizePolicy.Ignored))
self.layout.addWidget( self.locLabel, 1 )
# reference holder for subplots_adjust window
self.adj_window = None
def destroy( self ):
for text, tooltip_text, image_file, callback in self.toolitems:
if text is not None:
qt.QObject.disconnect( self.buttons[ text ],
qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
def pan( self, *args ):
self.buttons[ 'Zoom' ].setOn( False )
NavigationToolbar2.pan( self, *args )
def zoom( self, *args ):
self.buttons[ 'Pan' ].setOn( False )
NavigationToolbar2.zoom( self, *args )
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.locLabel.setText( s )
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
qt.QApplication.restoreOverrideCursor()
qt.QApplication.setOverrideCursor( qt.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = qt.QMainWindow(None, None, qt.Qt.WDestructiveClose)
win = self.adj_window
win.setCaption("Subplot Configuration Tool")
toolfig = Figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
canvas = self._get_canvas(toolfig)
tool = SubplotTool(self.canvas.figure, toolfig)
centralWidget = qt.QWidget(win)
canvas.reparent(centralWidget, qt.QPoint(0, 0))
win.setCentralWidget(centralWidget)
layout = qt.QVBoxLayout(centralWidget)
layout.addWidget(canvas, 1)
win.resize(w, h)
canvas.setFocus()
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = qt.QFileDialog.getSaveFileName(
start, filters, self, "Save image", "Choose a filename to save to",
selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
qt.QMessageBox.critical(
self, "Error saving file", str(e),
qt.QMessageBox.Ok, qt.QMessageBox.NoButton)
def set_history_buttons( self ):
canBackward = ( self._views._pos > 0 )
canForward = ( self._views._pos < len( self._views._elements ) - 1 )
self.buttons[ 'Back' ].setEnabled( canBackward )
self.buttons[ 'Forward' ].setEnabled( canForward )
# set icon used when windows are minimized
try:
# TODO: This is badly broken
qt.window_set_default_icon_from_file (
os.path.join( matplotlib.rcParams['datapath'], 'images', 'matplotlib.svg' ) )
except:
verbose.report( 'Could not load matplotlib icon: %s' % sys.exc_info()[1] )
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
qt.QMessageBox.warning( None, "Matplotlib", msg, qt.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| agpl-3.0 |
jreback/pandas | pandas/tests/io/pytables/common.py | 4 | 2054 | from contextlib import contextmanager
import os
import tempfile
import pytest
from pandas.io.pytables import HDFStore
tables = pytest.importorskip("tables")
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except OSError:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except OSError:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(), path)
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean_store(path, mode="a", complevel=None, complib=None, fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(
path, mode=mode, complevel=complevel, complib=complib, fletcher32=False
)
yield store
finally:
safe_close(store)
if mode == "w" or mode == "a":
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on exiting; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [create_tempfile(p) for p in path]
yield filenames
else:
filenames = [create_tempfile(path)]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
def _maybe_remove(store, key):
"""
For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name.
"""
try:
store.remove(key)
except (ValueError, KeyError):
pass
| bsd-3-clause |
dankolbman/NumericalAnalysis | Homeworks/HW2/Problem5ii.py | 1 | 3007 | import math
import scipy.interpolate as intrp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
font = {'family' : 'normal',
'size' : 24}
rc('font', **font)
### The function
def f(t):
return 1/(1+t**2)
# Spline
def spline(xpts, ypts):
n = len(xpts)
mat = np.zeros(( n, n))
rhs = np.zeros(( n,1 ))
for i in range(1,n-1):
rhs[i] = 6 * ( (ypts[i+1]-ypts[i]) / (xpts[i+1]-xpts[i]) \
-(ypts[i]-ypts[i-1]) / (xpts[i]-xpts[i-1]) )
for j in range(0,n-1):
# Set triagonal elements
if(j==i-1): mat[i][j] += xpts[i] - xpts[i-1]
elif(j==i): mat[i][j] += 2*(xpts[i+1]-xpts[i-1])
elif(j==i+1): mat[i][j] += xpts[i+1]-xpts[i]
# BCs
mat[0][0] = 1
mat[-1][-1] = 1
rhs[0] = 0
rhs[-1] = 0
# Solve it
x_vec = np.linalg.solve(mat, rhs)
return x_vec
#######
# The function
x = [ i/100 for i in range(-500,500) ]
fx = [ f(i) for i in x ]
plt.plot(x,fx, 'k--',label='f(t)', linewidth=5)
### 5 points
xpts = np.linspace(-5, 5, 5)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'r', label='5 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 5 Points:', rmse)
### 10 points
xpts = np.linspace(-5, 5, 10)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'b', label='10 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 10 Points:', rmse)
### 15 points
xpts = np.linspace(-5, 5, 15)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'g', label='15 Points',linewidth=3)
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 15 Points:', rmse)
plt.legend(fontsize=16)
plt.ylim( [-0.2, 1.1] )
plt.title('Natural Cubic Splines for $f(t)$')
plt.savefig('Problem5ii.png')
plt.show()
| mit |
samueljackson92/NDImage | ndimage/gui/mpl_canvas.py | 1 | 2950 | from __future__ import unicode_literals
from PyQt4 import QtGui
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class PandasMplWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.pmc = PandasMplCanvas(width=2, height=2, dpi=100)
self.toolbar = NavigationToolbar(self.pmc.figure.canvas, self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Fixed)
sizePolicy.setHeightForWidth(True)
self.toolbar.setSizePolicy(sizePolicy)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.pmc)
vbox.addWidget(self.toolbar)
self.setLayout(vbox)
def get_figure_canvas(self):
return self.pmc
def plot(self, data):
self.pmc.plot_data_frame(data)
class PandasMplCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.figure.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
FigureCanvas.__init__(self, self.figure)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def plot_data_frame(self, dataFrame):
x = dataFrame[[0]]
y = dataFrame[[1]]
self.points = self.axes.scatter(x, y, picker=True)
self.reset_face_color()
self.draw()
def set_class_label(self, labels):
categories = np.unique(labels)
colors = np.linspace(0, 1, len(categories))
colordict = dict(zip(categories, colors))
colour_weights = labels.apply(lambda x: colordict[x])
color_map = plt.get_cmap("Spectral")
class_colors = color_map(colour_weights)
# copy existing alpha values
class_colors[:, -1] = self.face_color[:, -1]
self.face_color = class_colors
self.points.set_facecolors(self.face_color)
self.draw()
def reset_face_color(self):
npts = len(self.points.get_offsets())
self.face_color = self.points.get_facecolors()
self.face_color = np.tile(self.face_color, npts).reshape(npts, -1)
def reset_color(self):
self.face_color = self.points.get_facecolors()
self.face_color[:, -1] = 1
self.points.set_facecolors(self.face_color)
self.draw()
def highlight_points(self, idx, alpha=0.2):
self.face_color[:, -1] = alpha
self.face_color[idx, -1] = 1
self.points.set_facecolors(self.face_color)
| mit |
natashabatalha/PandExo | pandexo/engine/create_input.py | 1 | 18989 | import numpy as np
import pickle
import pandas as pd
from sqlalchemy import *
import astropy.units as u
import astropy.constants as c
import os
from astropy.modeling import blackbody as bb
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
import pysynphot as psyn
def outTrans(input) :
"""Compute out of transit spectra
Computes the out of transit spectra by normalizing flux to specified
magnitude and convert to specified Pandeia units of milliJy and microns.
Parameters
----------
input : dict
stellar scene which includes parameters to extract phoenix database or a
filename which points to a stellar spectrum
Return
------
dict
contains wave and flux_out_trans
"""
ref_wave = float(input['ref_wave'])
mag = float(input['mag'])
################# USER ####################################
if input['type'] == 'user':
if isinstance(input['starpath'], dict):
star = input['starpath']
else: #if isinstance(input['starpath'], str):
star = np.genfromtxt(input['starpath'], dtype=(float, float),
names='w, f')
#get flux
flux = star['f'] #star.field(input['logg'])
#get wavelength and reference wavelength for mag normalization
wave = star['w'] #star.field('WAVELENGTH')
#sort if not in ascending order
sort = np.array([wave,flux]).T
sort= sort[sort[:,0].argsort()]
wave = sort[:,0]
flux = sort[:,1]
if input['w_unit'] == 'um':
PANDEIA_WAVEUNITS = 'um'
elif input['w_unit'] == 'nm':
PANDEIA_WAVEUNITS = 'nm'
elif input['w_unit'] == 'cm' :
PANDEIA_WAVEUNITS = 'cm'
elif input['w_unit'] == 'Angs' :
PANDEIA_WAVEUNITS = 'angstrom'
elif input['w_unit'] == 'Hz' :
PANDEIA_WAVEUNITS = 'Hz'
else:
raise Exception('Units are not correct. Pick um, nm, cm, hz, or Angs')
#convert to photons/s/nm/m^2 for flux normalization based on
#http://www.gemini.edu/sciops/instruments/integration-time-calculators/itc-help/source-definition
if input['f_unit'] == 'Jy':
PANDEIA_FLUXUNITS = 'jy'
elif input['f_unit'] == 'FLAM' :
PANDEIA_FLUXUNITS = 'FLAM'
elif input['f_unit'] == 'erg/cm2/s/Hz':
flux = flux*1e23
PANDEIA_FLUXUNITS = 'jy'
else:
raise Exception('Units are not correct. Pick FLAM or Jy or erg/cm2/s/Hz')
sp = psyn.ArraySpectrum(wave, flux, waveunits=PANDEIA_WAVEUNITS, fluxunits=PANDEIA_FLUXUNITS) #Convert evrything to nanometer for converstion based on gemini.edu
sp.convert("nm")
sp.convert('jy')
############ PHOENIX ################################################
elif input['type'] =='phoenix':
#make sure metal is not out of bounds
if input['metal'] > 0.5: input['metal'] = 0.5
sp = psyn.Icat("phoenix", input['temp'], input['metal'], input['logg'])
sp.convert("nm")
sp.convert("jy")
wave = sp.wave
flux = sp.flux
input['w_unit'] ='nm'
input['f_unit'] = 'jy'
else:
raise Exception('Wrong input type for stellar spectra')
############ NORMALIZATION ################################################
refdata = os.environ.get("pandeia_refdata")
all_bps = {"H": 'bessell_h_004_syn.fits',
"J":'bessell_j_003_syn.fits' ,
"K": 'bessell_k_003_syn.fits'}
if (ref_wave <= 1.3) & (ref_wave >= 1.2):
filt = 'J'
elif (ref_wave <= 1.7) & (ref_wave >= 1.6):
filt = 'H'
elif (ref_wave <= 2.3) & (ref_wave >= 2.1):
filt = 'K'
else:
raise Exception('Only J H and K zeropoints are included')
bp_path = os.path.join(refdata, "normalization", "bandpass", all_bps[filt])
bp = psyn.FileBandpass(bp_path)
sp.convert('angstroms')
bp.convert('angstroms')
rn_sp = sp.renorm(mag, 'vegamag', bp)
rn_sp.convert("microns")
rn_sp.convert("mjy")
flux_out_trans = rn_sp.flux
wave = rn_sp.wave
return {'flux_out_trans': flux_out_trans, 'wave': wave,'phoenix':sp}
def bothTrans(out_trans, planet,star=None) :
"""Calculates in transit flux
Takes output from `outTrans`, which is the normalized stellar flux, and
creates either a transit transmission spectrum, phase curve or emission spectrum.
Magnitude
Parameters
----------
out_trans: dict
includes dictionary from `outTrans` output.
planet: dict
dictionary with direction to planet spectra, wavelength and flux units
star: dict
(Optional) dictionary within exo_input with stellar information. Only
used when scaling Fortney Grid spectra to get (rp/r*)^2
Return
------
dict
dictionary with out of transit flux, in transit flux, original model
and corresponding wavelengths
"""
if planet['type'] =='user':
if isinstance(planet['exopath'], dict):
load_file = planet['exopath']
else: #if isinstance(planet['exopath'], str):
load_file = np.genfromtxt(planet['exopath'], dtype=(float, float),
names='w, f')
#get wavelength
wave_planet = load_file['w']
#get planet flux
flux_planet = load_file['f']
#sort if not in ascending order
sort = np.array([wave_planet,flux_planet]).T
sort= sort[sort[:,0].argsort()]
wave_planet = sort[:,0]
flux_planet = sort[:,1]
############## IF USER SELECTS CONSTANT VALUE ##################
elif planet['type'] == 'constant':
rplan = (planet['radius']*u.Unit(planet['r_unit'])).to(u.km)
rstar = (star['radius']*u.Unit(star['r_unit'])).to(u.km)
#constant transit depth
if planet['f_unit'] == 'rp^2/r*^2':
wave_planet = np.linspace(0.5,15,1000)
planet['depth'] = float(rplan**2 / rstar**2)
flux_planet = np.linspace(0.5,15,1000)*0 + planet['depth']
planet['w_unit'] = 'um'
#constant fp/f* (using out_trans from user)
elif planet['f_unit'] == 'fp/f*':
planet['w_unit'] = 'um'
wave_planet = out_trans['wave'][(out_trans['wave']>0.5) & (out_trans['wave']<15)]
flux_star = (out_trans['phoenix'].flux*(u.Jy)).to(u.mJy)[(out_trans['wave']>0.5) & (out_trans['wave']<15)]
#MAKING SURE TO ADD IN SUPID PI FOR PER STERADIAN!!!!
flux_planet = (bb.blackbody_nu(wave_planet*u.micron, planet['temp']*u.K)*np.pi*u.sr).to(u.mJy)
# ( bb planet / pheonix sed ) * (rp/r*)^2
flux_planet = np.array((flux_planet/flux_star) * (rplan/rstar)**2.0)
############## IF USER SELECTS TO PULL FROM GRID ##################
elif planet['type'] =='grid':
try:
db = create_engine('sqlite:///'+os.environ.get('FORTGRID_DIR'))
header= pd.read_sql_table('header',db)
except:
raise Exception('Fortney Grid File Path is incorrect, or not initialized')
#radius of star
try:
rstar = (star['radius']*u.Unit(star['r_unit'])).to(u.km)
except:
raise Exception("Radius of Star not supplied for scaling. Check exo_input['star']['radius']")
#radius of planet
try:
rplan = (planet['radius']*u.Unit(planet['r_unit'])).to(u.km)
except:
planet['radius'] = (1.25*c.R_jup).to(u.km)
rplan = planet['radius']
print('Default Planet Radius of 1.25 Rj given')
#clouds
if planet['cloud'].find('flat') != -1:
planet['flat'] = int(planet['cloud'][4:])
planet['ray'] = 0
elif planet['cloud'].find('ray') != -1:
planet['ray'] = int(planet['cloud'][3:])
planet['flat'] = 0
elif int(planet['cloud']) == 0:
planet['flat'] = 0
planet['ray'] = 0
else:
planet['flat'] = 0
planet['ray'] = 0
print('No cloud parameter not specified, default no clouds added')
#chemistry
if planet['chem'] == 'noTiO':
planet['noTiO'] = True
planet['eqchem'] = True
if planet['chem'] == 'eqchem':
planet['noTiO'] = False
planet['eqchem'] = True
#grid does not allow clouds for cases with TiO
planet['flat'] = 0
planet['ray'] = 0
#we are only using gravity of 25 and scaling by mass from there
fort_grav = 25.0*u.m/u.s/u.s
df = header.loc[(header.gravity==fort_grav) & (header.temp==planet['temp'])
& (header.noTiO==planet['noTiO']) & (header.ray==planet['ray']) &
(header.flat==planet['flat'])]
wave_planet=np.array(pd.read_sql_table(df['name'].values[0],db)['wavelength'])[::-1]
r_lambda=np.array(pd.read_sql_table(df['name'].values[0],db)['radius'])*u.km
z_lambda = r_lambda- (1.25*u.R_jup).to(u.km) #all fortney models have fixed 1.25 radii
#scale with planetary mass
try:
mass = (planet['mass']*u.Unit(planet['m_unit'])).to(u.kg)
gravity = c.G*(mass)/(rplan.to(u.m))**2.0 #convert radius to m for gravity units
#scale lambbda (this technically ignores the fact that scaleheight is altitude dependent)
#therefore, it will not be valide for very very low gravities
z_lambda = z_lambda*fort_grav/gravity
except:
#keep original z lambda
gravity=25.0
z_lambda = z_lambda*fort_grav/fort_grav
print('Default Planet Gravity of 25 m/s2 given')
#create new wavelength dependent R based on scaled ravity
r_lambda = z_lambda + rplan
#finally compute (rp/r*)^2
flux_planet = np.array(r_lambda**2/rstar**2)[::-1]
planet['w_unit'] = 'um'
planet['f_unit'] = 'rp^2/r*^2'
else:
raise Exception("Incorrect Planet Type. Options are 'user','constant','grid'")
#Convert wave to micron
if planet['w_unit'] == 'um':
wave_planet = wave_planet
elif planet['w_unit'] == 'nm':
wave_planet = wave_planet*1e-3
elif planet['w_unit'] == 'cm':
wave_planet = wave_planet*1e4
elif planet['w_unit'] == 'Angs' :
wave_planet = wave_planet*1e-4
elif planet['w_unit'] == 'Hz' :
wave_planet = 3e17/wave_planet
elif planet['w_unit'] == 'sec' :
wave_planet = wave_planet
else:
raise Exception('Units are not correct. Pick um, nm, cm, Angs or sec.')
if planet['w_unit'] == 'sec' :
#star flux to feed into pandeia
time = wave_planet
flux_star = out_trans['flux_out_trans']
wave_star = out_trans['wave']
if planet['f_unit'] == 'fp/f*' :
flux_planet = flux_planet
else:
print("Seconds with rp^2/r*^2 units not an option. Switch to Fp/F*")
return
return {'time':time, 'wave':wave_star,'flux_out_trans':flux_star, 'planet_phase':flux_planet,
'model_wave':time, 'model_spec':flux_planet, 'frac':(1.+flux_planet)}
else:
#star flux to calc transit depth
flux_star = out_trans['flux_out_trans']
wave_star = out_trans['wave']
#give them same wave min and wave max
wavemin = max([min(wave_planet), min(wave_star),0.5])
wavemax = min([max(wave_planet),max(wave_star),15])
flux_planet = flux_planet[(wave_planet>wavemin) & (wave_planet<wavemax)]
wave_planet = wave_planet[(wave_planet>wavemin) & (wave_planet<wavemax)]
flux_out_trans = np.interp(wave_planet, wave_star, flux_star)
#convert to 1-depth
if planet['f_unit'] == 'rp^2/r*^2' :
depth_fraction = 1.-flux_planet
flux_in_trans = depth_fraction*flux_out_trans
elif planet['f_unit'] == 'fp/f*':
depth_fraction = (1.0 + flux_planet)
flux_in_trans = flux_out_trans*(1.0 + flux_planet)
else:
raise Exception('Units are not correct. Pick rp^2/r*^2 or fp/f*')
results= {'wave':wave_planet, 'flux_in_trans': flux_in_trans, 'flux_out_trans':flux_out_trans,
'model_wave':wave_planet, 'model_spec': flux_planet, 'frac':depth_fraction}
return results
def hst_spec(planet,star) :
"""Calculates in transit flux
Takes output from `outTrans`, which is the normalized stellar flux, and
creates either a transit transmission spectrum, phase curve or emission spectrum.
Magnitude
Parameters
----------
planet: dict
dictionary with direction to planet spectra, wavelength and flux units
star: dict
dictionary within exo_input with stellar information. Only
used when scaling Fortney Grid spectra to get (rp/r*)^2
Return
------
dict
dictionary with out of transit flux, in transit flux, original model
and corresponding wavelengths
"""
if planet['type'] =='user':
load_file = np.genfromtxt(planet['exopath'], dtype=(float, float), names='w, f')
#get wavelength
wave_planet = load_file['w']
#get planet flux
flux_planet = load_file['f']
#sort if not in ascending order
sort = np.array([wave_planet,flux_planet]).T
sort= sort[sort[:,0].argsort()]
wave_planet = sort[:,0]
flux_planet = sort[:,1]
############## IF USER SELECTS CONSTANT VALUE ##################
elif planet['type'] == 'constant':
rplan = (planet['radius']*u.Unit(planet['r_unit'])).to(u.km)
rstar = (star['radius']*u.Unit(star['r_unit'])).to(u.km)
#constant transit depth
if planet['f_unit'] == 'rp^2/r*^2':
wave_planet = np.linspace(0.1,3,500)
planet['depth'] = float(rplan**2 / rstar**2)
flux_planet = np.linspace(0.1,3,500)*0 + planet['depth']
planet['w_unit'] = 'um'
#constant fp/f* (using out_trans from user)
elif planet['f_unit'] == 'fp/f*':
planet['w_unit'] = 'um'
wave_planet = np.linspace(0.1,3,500)
flux_star = (bb.blackbody_nu(wave_planet*u.micron, star['temp']*u.K)*np.pi*u.sr).to(u.mJy)
#MAKING SURE TO ADD IN SUPID PI FOR PER STERADIAN!!!!
flux_planet = (bb.blackbody_nu(wave_planet*u.micron, planet['temp']*u.K)*np.pi*u.sr).to(u.mJy)
# ( bb planet / pheonix sed ) * (rp/r*)^2
flux_planet = np.array((flux_planet/flux_star) * (rplan/rstar)**2.0)
############## IF USER SELECTS TO PULL FROM GRID ##################
elif planet['type'] =='grid':
try:
db = create_engine('sqlite:///'+os.environ.get('FORTGRID_DIR'))
header= pd.read_sql_table('header',db)
except:
raise Exception('Fortney Grid File Path is incorrect, or not initialized')
#radius of star
try:
rstar = (star['radius']*u.Unit(star['r_unit'])).to(u.km)
except:
raise Exception("Radius of Star not supplied for scaling. Check exo_input['star']['radius']")
#radius of planet
try:
rplan = (planet['radius']*u.Unit(planet['r_unit'])).to(u.km)
except:
planet['radius'] = (1.25*c.R_jup).to(u.km)
rplan = planet['radius']
print('Default Planet Radius of 1.25 Rj given')
#clouds
if planet['cloud'].find('flat') != -1:
planet['flat'] = int(planet['cloud'][4:])
planet['ray'] = 0
elif planet['cloud'].find('ray') != -1:
planet['ray'] = int(planet['cloud'][3:])
planet['flat'] = 0
elif int(planet['cloud']) == 0:
planet['flat'] = 0
planet['ray'] = 0
else:
planet['flat'] = 0
planet['ray'] = 0
print('No cloud parameter not specified, default no clouds added')
#chemistry
if planet['chem'] == 'noTiO':
planet['noTiO'] = True
planet['eqchem'] = True
if planet['chem'] == 'eqchem':
planet['noTiO'] = False
planet['eqchem'] = True
#grid does not allow clouds for cases with TiO
planet['flat'] = 0
planet['ray'] = 0
#we are only using gravity of 25 and scaling by mass from there
fort_grav = 25.0*u.m/u.s/u.s
df = header.loc[(header.gravity==fort_grav) & (header.temp==planet['temp'])
& (header.noTiO==planet['noTiO']) & (header.ray==planet['ray']) &
(header.flat==planet['flat'])]
wave_planet=np.array(pd.read_sql_table(df['name'].values[0],db)['wavelength'])[::-1]
r_lambda=np.array(pd.read_sql_table(df['name'].values[0],db)['radius'])*u.km
z_lambda = r_lambda- (1.25*u.R_jup).to(u.km) #all fortney models have fixed 1.25 radii
#scale with planetary mass
try:
mass = (planet['mass']*u.Unit(planet['m_unit'])).to(u.kg)
gravity = c.G*(mass)/(rplan.to(u.m))**2.0 #convert radius to m for gravity units
#scale lambbda (this technically ignores the fact that scaleheight is altitude dependent)
#therefore, it will not be valide for very very low gravities
z_lambda = z_lambda*fort_grav/gravity
except:
#keep original z lambda
gravity=25.0
z_lambda = z_lambda*fort_grav/fort_grav
print('Default Planet Gravity of 25 m/s2 given')
#create new wavelength dependent R based on scaled ravity
r_lambda = z_lambda + rplan
#finally compute (rp/r*)^2
flux_planet = np.array(r_lambda**2/rstar**2)[::-1]
planet['w_unit'] = 'um'
planet['f_unit'] = 'rp^2/r*^2'
else:
raise Exception("Incorrect Planet Type. Options are 'user','constant','grid'")
#Convert wave to micron
if planet['w_unit'] == 'um':
wave_planet = wave_planet
elif planet['w_unit'] == 'nm':
wave_planet = wave_planet*1e-3
elif planet['w_unit'] == 'cm':
wave_planet = wave_planet*1e4
elif planet['w_unit'] == 'Angs' :
wave_planet = wave_planet*1e-4
elif planet['w_unit'] == 'Hz' :
wave_planet = 3e17/wave_planet
else:
raise Exception('Units are not correct. Pick um, nm, cm, Angs or sec.')
return wave_planet, flux_planet
| gpl-3.0 |
X-martin/robot_quant | frame/filter_method.py | 1 | 1740 | import pandas as pd
from datetime import datetime
from datetime import timedelta
def cross(factor_list, stock_list, date, args):
dt = timedelta(days=1)
df1 = factor_list[0].get_val(stock_list, date)
df2 = factor_list[1].get_val(stock_list, date)
df1_0 = factor_list[0].get_val(stock_list, date - dt)
df2_0 = factor_list[1].get_val(stock_list, date - dt)
id = (df1_0['FACTOR_VALUE'] < df2_0['FACTOR_VALUE']) & (df1['FACTOR_VALUE'] > df2['FACTOR_VALUE'])
df1 = df1[id]
return df1
def greater(factor_list, stock_list, date, args):
nf = len(factor_list)
if nf == 1:
df = factor_list[0].get_val(stock_list, date)
df = df[df['FACTOR_VALUE'] > args[0]]
else:
df1 = factor_list[0].get_val(stock_list, date)
df2 = factor_list[1].get_val(stock_list, date)
df = df1[df1['FACTOR_VALUE'] > df2['FACTOR_VALUE']]
return df
def sort(factor_list, stock_list, date, args):
args = args[0]
df = factor_list[0].get_val(stock_list, date)
if args[0] == 'asc':
df = df.sort_values(['FACTOR_VALUE'], ascending=True)
nn = float(args[1])
if nn < 1:
nn = int(len(df) * nn)
nn = int(nn)
df = df.iloc[:nn, :]
else:
df = df.sort_values(['FACTOR_VALUE'], ascending=False)
nn = float(args[1])
if nn < 1:
nn = int(len(df) * nn)
nn = int(nn)
df = df.iloc[:nn, :]
return df
__d_filter_method = {'CROSS': cross,
'GREATER': greater,
'SORT': sort}
def apply_filter_method(method_name, factor_list, stock_list, date, args):
df = __d_filter_method[method_name](factor_list, stock_list, date, args)
return df
| mit |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/tseries/plotting.py | 9 | 9293 | """
Period formatters and locators adapted from scikits.timeseries by
Pierre GF Gerard-Marchant & Matt Knox
"""
#!!! TODO: Use the fact that axis can have units to simplify the process
import numpy as np
from matplotlib import pylab
from pandas.tseries.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
import pandas.compat as compat
from pandas.tseries.converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter)
#----------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
return lines
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = series.resample('D', how=how).dropna()
series = series.resample(ax_freq, how=how).dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, series
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, compat.string_types):
from pandas.tools.plotting import _plot_klass
plotf = _plot_klass[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0])
labels.append(com.pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
return False
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _get_index_freq(data):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data = data.to_period(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = lambda t, y: ("t = {0} "
"y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
pylab.draw_if_interactive()
| gpl-2.0 |
Rossonero/bmlswp | ch06/02_tuning.py | 22 | 5484 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
phase = "02"
def create_ngram_model(params=None):
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__stop_words=[None, "english"],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print(clf)
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(vect__ngram_range=(1, 2),
vect__min_df=1,
vect__stop_words=None,
vect__smooth_idf=False,
vect__use_idf=False,
vect__sublinear_tf=True,
vect__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_ngram_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
| mit |
xubenben/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
samuel1208/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
blackecho/Deep-Belief-Network | yadlt/core/unsupervised_model.py | 2 | 4251 | """Unsupervised Model scheleton."""
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from yadlt.core.model import Model
from yadlt.utils import tf_utils
class UnsupervisedModel(Model):
"""Unsupervised Model scheleton class.
The interface of the class is sklearn-like.
Methods
-------
* fit(): model training procedure.
* transform(): model inference procedure.
* reconstruct(): model reconstruction procedure (autoencoders).
* score(): model scoring procedure (mean error).
"""
def __init__(self, name):
"""Constructor."""
Model.__init__(self, name)
def fit(self, train_X, train_Y=None, val_X=None, val_Y=None, graph=None):
"""Fit the model to the data.
Parameters
----------
train_X : array_like, shape (n_samples, n_features)
Training data.
train_Y : array_like, shape (n_samples, n_features)
Training reference data.
val_X : array_like, shape (N, n_features) optional, (default = None).
Validation data.
val_Y : array_like, shape (N, n_features) optional, (default = None).
Validation reference data.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object.
Returns
-------
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
# Build model
self.build_model(train_X.shape[1])
with tf.Session() as self.tf_session:
# Initialize tf stuff
summary_objs = tf_utils.init_tf_ops(self.tf_session)
self.tf_merged_summaries = summary_objs[0]
self.tf_summary_writer = summary_objs[1]
self.tf_saver = summary_objs[2]
# Train model
self._train_model(train_X, train_Y, val_X, val_Y)
# Save model
self.tf_saver.save(self.tf_session, self.model_path)
def transform(self, data, graph=None):
"""Transform data according to the model.
Parameters
----------
data : array_like, shape (n_samples, n_features)
Data to transform.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object
Returns
-------
array_like, transformed data
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {self.input_data: data, self.keep_prob: 1}
return self.encode.eval(feed)
def reconstruct(self, data, graph=None):
"""Reconstruct data according to the model.
Parameters
----------
data : array_like, shape (n_samples, n_features)
Data to transform.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object
Returns
-------
array_like, transformed data
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {self.input_data: data, self.keep_prob: 1}
return self.reconstruction.eval(feed)
def score(self, data, data_ref, graph=None):
"""Compute the reconstruction loss over the test set.
Parameters
----------
data : array_like
Data to reconstruct.
data_ref : array_like
Reference data.
Returns
-------
float: Mean error.
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: data,
self.input_labels: data_ref,
self.keep_prob: 1
}
return self.cost.eval(feed)
| apache-2.0 |
braghiere/JULESv4.6_clump | examples/us-me2/output/plot_gpp_anomaly_boreas.py | 1 | 11326 | import os
import matplotlib.pyplot as plt
import numpy as np
import sys
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MultipleLocator
import matplotlib.patches as mpatches # for mask legend
from matplotlib.font_manager import FontProperties
from matplotlib import cm
import pandas as pd
import matplotlib
from matplotlib import dates as d
import datetime as dt
import scipy.stats as st
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
SIZE = 32
plt.rc('font', size=SIZE) # controls default text sizes
plt.rc('axes', titlesize=SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SIZE) # fontsize of the x any y labels
plt.rc('xtick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SIZE) # legend fontsize
plt.rc('figure', titlesize=SIZE) # # size of the figure title
inputdir = "/glusterfs/phd/users/mn811042/jules_struct/JULES_trunk_lim_gpp/examples/boreas/output/files/"
#filename = 'output_layer_250_%i_12_a_042.csv'
filename1 = 'output_layer_500_%i_11_CRM_4.txt'
#filename2 = 'output_layer_500_%i_11_struc.txt'
filename2 = 'output_layer_500_%i_11_CRM_5.txt'
PPFDs = np.arange(50)
#temps = np.arange(0,52,2)
#cas = np.arange(200,850,50)
layers = np.arange(1,11,1)
szas = np.arange(30,91,1)
data = pd.read_csv("file_lai_500_CRM_4_5.csv", index_col=0, parse_dates=True).tz_localize("UTC").tz_convert("Etc/GMT+7")
data.head()
#data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
#print data['Time']
#data.head()
#data = data.groupby('Time').describe().unstack()
data = data.groupby(pd.cut(data["cosz"], np.arange(0.0,1.0,0.075))).describe().unstack()
#print data
#data.index = pd.to_datetime(data.index.astype(str))
data.index = np.degrees(np.arccos(data["cosz"]['mean']))
szas = np.int16(data.index)
print szas
#for opt T
#filename = 'output_%i_%i_pft%i_opt_t'
#PPFDs = np.arange(0)
#temps = np.arange(0,90,10)
#cas = np.arange(400)
# create a matrix dim0: PPFDs,
# dim1: Tl
# dim2: cas
# dim3: variables # An, Wc, Wl, We, mask (1 = Wcarb limiting; 2= Wexp limiting)
ft = 1
#W = np.zeros((len(PPFDs),len(temps),len(cas),12))
W_1 = np.zeros((len(szas),len(layers),11))
#for i,p in enumerate(PPFDs):
for j,sza in enumerate(szas):
ofile = filename1 %(sza)
print ofile
with open(os.path.join(inputdir,ofile),'r') as f:
#header = f.readline()
lines = f.readlines()
for k,line in enumerate(lines): #podria hacer otro loop
#por todas las variables
W_1[j,k,0] = line.split(' ')[1] # GPP
W_1[j,k,1] = line.split(' ')[3] # Wcarb
W_1[j,k,2] = line.split(' ')[2] # Wlite
W_1[j,k,3] = line.split(' ')[4] # Wexp
#W[j,k,4] = line.split(',')[14] # Rd
#W[j,k,5] = line.split(',')[11] # ci
#W[j,k,6] = line.split(',')[12] # gl
#W[j,k,7] = line.split(',')[18] # dqc
W_1[j,k,9] = W_1[j,k,0] # GPP
#print j,k,W[j,k,0]
if (W_1[j,k,1]==0) & (W_1[j,k,2]==0) & (W_1[j,k,3]==0):
Wmin = 0
ma = 0 # all are zero
elif (W_1[j,k,1] <= W_1[j,k,2]): # compare Wcarb and Wlite
Wmin = W_1[j,k,1]
ma= 1 # Wcarb limiting
else:
Wmin = W_1[j,k,2]
ma= 2 # Wlite limiting
# compare the minimun of Wcarb and Wlite with Wexp, but only if they are not all 0
if (ma!=0) & (W_1[j,k,3] <= Wmin):
Wmin = W_1[j,k,3]
ma= 3 # Wexp limiting
W_1[j,k,8] = ma
#print W[j,k,8]
#create masks # ILL TRy without, just using vataible l=8
#if (f==0): W[i,j,9] = 1
#if (f==1): W[i,j,10] = 1
#if (f==2): W[i,j,11] = 1
###########################################################################################
#sys.exit()
# create a matrix dim0: PPFDs,
# dim1: Tl
# dim2: cas
# dim3: variables # An, Wc, Wl, We, mask (1 = Wcarb limiting; 2= Wexp limiting)
ft = 1
#W = np.zeros((len(PPFDs),len(temps),len(cas),12))
W_2 = np.zeros((len(szas),len(layers),11))
#for i,p in enumerate(PPFDs):
for j,sza in enumerate(szas):
ofile = filename2 %(sza)
print ofile
with open(os.path.join(inputdir,ofile),'r') as f:
#header = f.readline()
lines = f.readlines()
for k,line in enumerate(lines): #podria hacer otro loop
#por todas las variables
W_2[j,k,0] = line.split(' ')[1] # GPP
W_2[j,k,1] = line.split(' ')[3] # Wcarb
W_2[j,k,2] = line.split(' ')[2] # Wlite
W_2[j,k,3] = line.split(' ')[4] # Wexp
W_2[j,k,9] = W_2[j,k,0] # GPP
#print j,k,W[j,k,0]
if (W_2[j,k,1]==0) & (W_2[j,k,2]==0) & (W_2[j,k,3]==0):
Wmin = 0
ma = 0 # all are zero
elif (W_2[j,k,1] <= W_2[j,k,2]): # compare Wcarb and Wlite
Wmin = W_2[j,k,1]
ma= 1 # Wcarb limiting
else:
Wmin = W_2[j,k,2]
ma= 2 # Wlite limiting
# compare the minimun of Wcarb and Wlite with Wexp, but only if they are not all 0
if (ma!=0) & (W_2[j,k,3] <= Wmin):
Wmin = W_2[j,k,3]
ma= 3 # Wexp limiting
W_2[j,k,8] = ma
#print W[j,k,8]
#create masks # ILL TRy without, just using vataible l=8
#if (f==0): W[i,j,9] = 1
#if (f==1): W[i,j,10] = 1
#if (f==2): W[i,j,11] = 1
###########################################################################################
#sys.exit()
szas_repeat= np.repeat(layers,len(szas))
layers_tile = np.tile(szas,len(layers))
W_1_carb = np.ma.masked_where(W_1[:,:,8].T !=1, W_1[:,:,8].T)
W_1_lite = np.ma.masked_where(W_1[:,:,8].T !=2, W_1[:,:,8].T)
W_1_exp = np.ma.masked_where(W_1[:,:,8].T !=3, W_1[:,:,8].T)
W_2_carb = np.ma.masked_where(W_2[:,:,8].T !=1, W_2[:,:,8].T)
W_2_lite = np.ma.masked_where(W_2[:,:,8].T !=2, W_2[:,:,8].T)
W_2_exp = np.ma.masked_where(W_2[:,:,8].T !=3, W_2[:,:,8].T)
print "GPP_TS[:,:,0,0]*1e6=",W_1[:,:,0]*1e6,"GPP_SF[:,:,0,0]*1e6=",W_2[:,:,0]*1e6
GPP_anomaly = np.zeros((len(szas),len(layers),10))
GPP_anomaly_perc = np.zeros((len(szas),len(layers),10))
for j,sza in enumerate(szas):
for k,line in enumerate(lines):
GPP_anomaly[j,k,9] = (W_2[j,k,9]-W_1[j,k,9])
GPP_anomaly_perc[j,k,9] = 100*(W_2[j,k,9] - W_1[j,k,9])/((W_1[j,k,9] + W_2[j,k,9])/2)
#print GPP_anomaly[j,k,0]
#print "GPP_anomaly[",j,",",k,",0]=(",W_1[j,k,9],"-",W_2[j,k,9],")*1e6"
szas_repeat= np.repeat(layers,len(szas))
layers_tile = np.tile(szas,len(layers))
#W=W5
print "Integral(GPPanomaly)=",np.sum(GPP_anomaly)
print "Integral(GPPTS)=",np.sum(W_1[:,:,0]*1e6)
print "Integral(GPPSF)=",np.sum(W_2[:,:,0]*1e6)
fig,ax = plt.subplots(figsize=(10,10))
levels = np.arange(0,11,1)
fig.subplots_adjust(right=0.75)
W_carb = np.ma.masked_where(W_2[:,:,8].T == W_1[:,:,8].T,W_2_carb)
W_lite = np.ma.masked_where(W_2[:,:,8].T == W_1[:,:,8].T,W_2_lite)
W_exp = np.ma.masked_where(W_2[:,:,8].T == W_1[:,:,8].T,W_2_exp)
plt.gca().invert_yaxis()
plt.ylim(ymin=10.5,ymax=0.5)
plt.xlim(xmin=29.2,xmax=90.5)
plt.yticks(np.arange(1.0, 10.1, 1.0))
c1=plt.contourf(szas,layers,GPP_anomaly_perc[:,:,9].T,np.arange(-100, 100.1, 10),extend='both',cmap=cm.bwr,alpha=1.0,interpolation='bicubic')
#c1=plt.contourf(szas,layers,GPP_anomaly[:,:,9].T,np.arange(-2.0, 2.1, 0.2),extend='both',cmap=cm.bwr,alpha=1.0,interpolation='bicubic')
plt.scatter(layers_tile,szas_repeat,c=W_carb,marker='^',color='k',s=120,label='carbon limited')
plt.scatter(layers_tile,szas_repeat,c=W_lite,marker='.',color='k',s=120,label='light limited')
plt.scatter(layers_tile,szas_repeat,c=W_exp,marker='+',color='k',s=120,label='export limited')
#WITH LEGEND
# loc = 4 > 'lower right'
#plt.legend(loc=4,fontsize = 20)
#NO LEGEND
plt.legend_ = None
#c1=plt.contourf(szas,layers,GPP_anomaly[:,:,9].T,np.arange(-0.0000009, 0.0000000015, .00000015),extend='both',cmap=cm.Blues_r,alpha=0.8)
# in white
# To avoid rainbow scale could use , cmap=plt.cm.RdYlBu, but i think is less clear
# Gives error if the mask is all true (that limit is not present)
# If the mask is all tru, scatter gives a value error,in that case
# scatter with dummy argument, so the label still appears in the legend
#try: plt.scatter(szas_repeat,layers_tile,c=W_carb,marker='^',color='k',s=30,label='carbon limited')
#try: plt.scatter(layers_tile,szas_repeat,c=W_carb,marker='^',color='k',s=30,label='carbon limited')
#except ValueError: plt.scatter([],[],marker='^',color='k',s=30,label='carbon limited')
##try: plt.scatter(szas_repeat,layers_tile,c=W_lite,marker='.',color='k',s=30,label='light limited')
#try: plt.scatter(layers_tile,szas_repeat,c=W_lite,marker='.',color='k',s=30,label='light limited')
#except ValueError: plt.scatter([],[],marker='.',color='k',s=30,label='light limited')
##try: plt.scatter(szas_repeat,layers_tile,c=W_exp,marker='+',color='k',s=30,label='export limited')
#try: plt.scatter(layers_tile,szas_repeat,c=W_exp,marker='+',color='k',s=30,label='export limited')
#except ValueError: plt.scatter([],[],marker='+',color='k',s=30,label='export limited')
plt.ylabel('Layer',fontsize = 20)
#plt.text(0.5,1500,'ca = 200ppm',bbox=dict(facecolor='white'))
#plt.legend(ncol=3,prop={'size':10},bbox_to_anchor = (0.755, 1.2),scatterpoints=1)
plt.xlabel('Solar Zenith Angle',fontsize = 20)
plt.tick_params(labelsize=20)
plt.title(r'Diurnal Profile - DOY 193-206',fontsize = 20)
cbar_ax = fig.add_axes([0.8, 0.15, 0.03, 0.7])
cbar=fig.colorbar(c1, cax=cbar_ax)
cbar.ax.set_ylabel(r'GPP anomaly (%)',fontsize = 20)
#cbar.ax.set_ylabel(r'GPP anomaly ($\mu$mol.m$^{-2}$.s$^{-1}$)',fontsize = 20)
cbar.ax.tick_params(labelsize=20)
plt.savefig('/home/mn811042/Thesis/chapter6/sensitivity_jules_boreas/figures_boreas_ssa_oa_can_rad_mod/gpp_anomaly_lai_500_CRM_4_5_perc_cosz.png')
#plt.savefig('/home/mn811042/Desktop_1/report/MC5/mc5_angus/gpp_anomaly_ppdf_t_25_c4_lai_450_a_042-250_a_1_alpha_wcarb_exp_divided_by_layers.pdf')
#plt.savefig('/home/mn811042/Thesis/chapter6/GPP_layer_sza_ppdf_342_sza_t_22_ca_400_pft_c3_lai_250_albedo_12_SF-TS.pdf')
plt.show()
| gpl-2.0 |
braghiere/JULESv4.6_clump | examples/boreas_4.6/output/plotfapar_cosz.py | 1 | 7233 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import dates as d
import datetime as dt
import numpy as np
# CODE FROM http://nipunbatra.github.io/2015/06/timeseries/
plt.style.use('ggplot')
SIZE = 32
plt.rc('font', size=SIZE) # controls default text sizes
plt.rc('axes', titlesize=SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SIZE) # fontsize of the x any y labels
plt.rc('xtick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SIZE) # legend fontsize
plt.rc('figure', titlesize=SIZE) # # size of the figure title
# CODE FROM https://www.davidhagan.me/articles?id=7
#data = pd.read_excel(filename, sheetname, skiprows = skiprows, index_col = index_col)
#data = pd.read_csv("weather_2.csv", index_col=0, parse_dates=True).tz_localize("UTC").tz_convert("US/Cebtral")
#data = pd.read_csv("file_193_206_bt_flag_no.csv", index_col=0, parse_dates=True).tz_localize("UTC")
#data = pd.read_csv("file_193_206_bt_flag_no.csv", index_col=0, parse_dates=True).tz_localize("UTC").tz_convert("US/Pacific")
data = pd.read_csv("file_193_206_bt_500_can_mod_5_diff_struc.csv", index_col=0, parse_dates=True).tz_localize("UTC").tz_convert("Etc/GMT+7")
#print data
data.head()
#data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
#print data['Time']
#data.head()
#data = data.groupby('Time').describe().unstack()
data = data.groupby(pd.cut(data["cosz"], np.arange(0.0,1.0,0.075))).describe().unstack()
#print data
#data.index = pd.to_datetime(data.index.astype(str))
data.index = np.degrees(np.arccos(data["cosz"]['mean']))
#print data.index, data["PARi"]['mean']/4.6
print data.index, data["temp"]['mean']
fig, ax = plt.subplots(1, figsize=(24,12))
ax.set_title('Diurnal Profile - DOY 193-206', fontsize=32)
ax.set_ylabel('fAPAR', fontsize=32, weight='bold')
ax.set_xlabel('Solar Zenith Angle', fontsize=32)
ax.plot(data.index, data["fapar_jules"]['mean'], 'g', linewidth=2.0, label = 'Option 5')
ax.plot(data.index, data["fapar_jules_struc"]['mean'], 'r', linewidth=2.0, label = 'Option 5 (diff + struc)')
ticks = ax.get_xticks()
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
#ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
plt.tight_layout()
#plt.show()
plt.legend(loc="best")
ax.plot(data.index, data["fapar_jules"]['75%'], color='g')
ax.plot(data.index, data["fapar_jules"]['25%'], color='g')
ax.plot(data.index, data["fapar_jules_struc"]['75%'], color='r')
ax.plot(data.index, data["fapar_jules_struc"]['25%'], color='r')
ax.fill_between(data.index, data["fapar_jules"]['mean'], data["fapar_jules"]['75%'], alpha=.5, facecolor='g')
ax.fill_between(data.index, data["fapar_jules"]['mean'], data["fapar_jules"]['25%'], alpha=.5, facecolor='g')
ax.fill_between(data.index, data["fapar_jules_struc"]['mean'], data["fapar_jules_struc"]['75%'], alpha=.5, facecolor='r')
ax.fill_between(data.index, data["fapar_jules_struc"]['mean'], data["fapar_jules_struc"]['25%'], alpha=.5, facecolor='r')
plt.savefig('/home/mn811042/Thesis/chapter6/sensitivity_jules_boreas/figures_boreas_ssa_oa_can_rad_mod/fapar_193_206_bt_lai_500_can_rad_5_diff_struc_cosz.png')
plt.show()
fig, ax = plt.subplots(1, figsize=(24,12))
ax.set_title('Diurnal Profile - DOY 193-206', fontsize=32)
ax.set_ylabel('Fraction of Diffuse Radiation', fontsize=32, weight='bold')
ax.set_xlabel('Solar Zenith Angle', fontsize=32)
#ax.plot(data.index, data["diff_jules"]['mean'], 'g', linewidth=2.0, label = 'JULES')
#ax.plot(data.index, data["diff_jules_struc"]['mean'], 'r', linewidth=2.0, label = 'JULES Struc')
ticks = ax.get_xticks()
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
#ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
plt.tight_layout()
#plt.show()
#plt.legend(loc="best")
#ax.plot(data.index, data["diff_jules"]['75%'], color='g')
#ax.plot(data.index, data["diff_jules"]['25%'], color='g')
#ax.plot(data.index, data["diff_jules_struc"]['75%'], color='r')
#ax.plot(data.index, data["diff_jules_struc"]['25%'], color='r')
#ax.fill_between(data.index, data["diff_jules"]['mean'], data["diff_jules"]['75%'], alpha=.5, facecolor='g')
#ax.fill_between(data.index, data["diff_jules"]['mean'], data["diff_jules"]['25%'], alpha=.5, facecolor='g')
#ax.fill_between(data.index, data["diff_jules_struc"]['mean'], data["diff_jules_struc"]['75%'], alpha=.5, facecolor='r')
#ax.fill_between(data.index, data["diff_jules_struc"]['mean'], data["diff_jules_struc"]['25%'], alpha=.5, facecolor='r')
#plt.savefig('/home/mn811042/Thesis/chapter6/sensitivity_jules_boreas/figures_boreas_ssa_oa_can_rad_mod/diff_193_206_bt_lai_500_can_rad_5_diff_cosz.png')
plt.show()
fig, ax = plt.subplots(1, figsize=(24,12))
ax.set_title('Diurnal Profile - DOY 193-206', fontsize=32)
ax.set_ylabel(r'GPP ($\mu$mol.m$^{-2}$.s$^{-1}$)', fontsize=32, weight='bold')
ax.set_xlabel('Solar Zenith Angle', fontsize=32)
data["gpp_jules"] = data["gpp_jules"]/data["fsmc_jules"]
data["gpp_jules_struc"] = data["gpp_jules_struc"]/data["fsmc_jules_struc"]
ax.plot(data.index, data["gpp"]['mean'], 'ko', linewidth=2.0, label = 'Obs.')
ax.plot(data.index, data["gpp_jules"]['mean'], 'g', linewidth=2.0, label = 'Option 5')
ax.plot(data.index, data["gpp_jules_struc"]['mean'], 'r', linewidth=2.0, label = 'Option 5 (diff + struc)')
ticks = ax.get_xticks()
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
#ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
plt.tight_layout()
#plt.show()
plt.legend(loc="best")
ax.plot(data.index, data["gpp"]['75%'], color='k')
ax.plot(data.index, data["gpp"]['25%'], color='k')
ax.plot(data.index, data["gpp_jules"]['75%'], color='g')
ax.plot(data.index, data["gpp_jules"]['25%'], color='g')
ax.plot(data.index, data["gpp_jules_struc"]['75%'], color='r')
ax.plot(data.index, data["gpp_jules_struc"]['25%'], color='r')
ax.fill_between(data.index, data["gpp"]['mean'], data["gpp"]['75%'], alpha=.5, facecolor='k')
ax.fill_between(data.index, data["gpp"]['mean'], data["gpp"]['25%'], alpha=.5, facecolor='k')
ax.fill_between(data.index, data["gpp_jules"]['mean'], data["gpp_jules"]['75%'], alpha=.5, facecolor='g')
ax.fill_between(data.index, data["gpp_jules"]['mean'], data["gpp_jules"]['25%'], alpha=.5, facecolor='g')
ax.fill_between(data.index, data["gpp_jules_struc"]['mean'], data["gpp_jules_struc"]['75%'], alpha=.5, facecolor='r')
ax.fill_between(data.index, data["gpp_jules_struc"]['mean'], data["gpp_jules_struc"]['25%'], alpha=.5, facecolor='r')
plt.savefig('/home/mn811042/Thesis/chapter6/sensitivity_jules_boreas/figures_boreas_ssa_oa_can_rad_mod/gpp_193_206_bt_lai_500_can_rad_5_diff_struc_cosz.png')
plt.show()
| gpl-2.0 |
toobaz/pandas | scripts/download_wheels.py | 1 | 1173 | #!/usr/bin/env python
"""Fetch wheels from wheels.scipy.org for a pandas version."""
import argparse
import pathlib
import sys
import urllib.parse
import urllib.request
from lxml import html
def parse_args(args=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("version", type=str, help="Pandas version (0.23.0)")
return parser.parse_args(args)
def fetch(version):
base = "http://wheels.scipy.org"
tree = html.parse(base)
root = tree.getroot()
dest = pathlib.Path("dist")
dest.mkdir(exist_ok=True)
files = [
x
for x in root.xpath("//a/text()")
if x.startswith("pandas-{}".format(version)) and not dest.joinpath(x).exists()
]
N = len(files)
for i, filename in enumerate(files, 1):
out = str(dest.joinpath(filename))
link = urllib.request.urljoin(base, filename)
urllib.request.urlretrieve(link, out)
print(
"Downloaded {link} to {out} [{i}/{N}]".format(link=link, out=out, i=i, N=N)
)
def main(args=None):
args = parse_args(args)
fetch(args.version)
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
spinellic/Mission-Planner | Lib/site-packages/numpy/doc/creation.py | 94 | 5411 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
| gpl-3.0 |
yslib/SGMMCluster | SGMM/TrainBlockGMM.py | 1 | 7987 | import numpy as np
from multiprocessing import Process
from sklearn import mixture
import time
import os
import sys
import logging
# counts = [0]*256
# path1 = "d:/count.txt"
# path2 = "d:/gaussian.txt"
# def save_count(path,count):
# file = open(path,'w')
# for i in range(len(count)):
# file.write(str(count[i])+'\n')
# file.close()
# single gauss component
class Gauss:
def __init__(self, weight, mean, covariance):
self.weight_ = weight
self.mean_ = mean
self.covariance_ = covariance
# single block
class Block:
def __init__(self):
self.gauss_num_ = 0
self.gausses_ = []
def add_gauss(self, gauss):
self.gausses_.append(gauss)
def save_block(path, block):
file = open(path,'w')
for i in range(len(block.gausses_)):
file.write(str(block.gausses_[i].weight_)+' '+str(block.gausses_[i].mean_)+' '+str(block.gausses_[i].covariance_)+'\n')
file.close()
# read all data into a array
def read_all_data(file_path, data):
f = open(file_path, 'rb')
filedata = f.read()
filesize = f.tell()
print(filesize)
filedata2 = bytearray(filedata)
for index in range(0, filesize):
data[index] = filedata2[index]
# data = bytearray(f.read())
print("file size:"+str(filesize)+" byte(s)")
f.close()
# train xth GMM
def train_single_block(index,
width,
depth,
width_num,
depth_num,
side,
ubg,
data):
height_index = int(index / (width_num * depth_num))
depth_index = int((index - height_index * (width_num * depth_num)) / width_num)
width_index = int(index - height_index * (width_num * depth_num) - depth_index * width_num)
start_width = width_index * side
start_depth = depth_index * side
start_height = height_index * side
# print("--------IN BLOCK:"+str(index))
# print("block num:"+str(width_num)+" "+str(depth_num))
# print("block coordinates:"+str(width_index)+" "+str(depth_index)+" "+str(height_index))
obs = [[]] * side * side * side
zero = True
zero_count = 0;
for x in range(0, side):
for y in range(0, side):
for z in range(0, side):
final_index = x * side * side + y * side + z
data_index = (start_height + x) * width * depth + (start_depth + y) * width + (start_width + z)
temp = data[data_index]
# if index == 456:
# counts.append(temp)
# if temp != 0:
# zero_count+=1
# zero = False
obs[final_index] = [temp]
# if zero == True:
# print("block:"+str(index)+" is zero")
# return Block()
# print(str(index)+" is non-zero:"+str(zero_count))
# if index == 456:
# save_count(path1,counts)
final_component_num = 4
g = mixture.GaussianMixture(n_components=final_component_num)
g.fit(obs)
final_g = g
# max_bic = g.bic(np.array(obs))
# max_num = min(ubg, len(obs))
# for component_num in range(2, max_num+1):
# g = mixture.GaussianMixture(n_components=component_num)
# g.fit(obs)
# bic_temp = g.bic(np.array(obs))
# if index == 456:
# print component_num,bic_temp
# if bic_temp < max_bic:
# final_g = g
# final_component_num = component_num
# max_bic = bic_temp
block = Block()
block.gauss_num_ = final_component_num
for component_index in range(0, final_component_num):
gauss = Gauss(final_g.weights_[component_index], final_g.means_[component_index][0], final_g.covariances_[component_index][0][0])
block.add_gauss(gauss)
# if index == 456:
# save_block(path2,block)
return block
# train a part of original data
def train_blocks(result_disk_address, data_source, block_num, index, stride, data, width, depth, depth_num, width_num, side, ubg):
block_gmm = [Block()] * stride
end_block = (index+1)*stride
end_index = stride
for i in range(0, stride):
if index * stride + i >= block_num:
end_block = index*stride+i;
end_index = i
break;
block_gmm[i] = train_single_block(index * stride + i, width, depth, width_num, depth_num, side, ubg, data)
gmm_output = result_disk_address + data_source + '_GMM_Result_'+str(index)+'.txt'
# restore block_sgmm into txt file
with open(gmm_output, "w") as f_out:
for i in range(0, end_index):
f_out.write(str(block_gmm[i].gauss_num_)+'\n')
for j in range(0, block_gmm[i].gauss_num_):
f_out.write(str(block_gmm[i].gausses_[j].weight_)+'\n')
f_out.write(str(block_gmm[i].gausses_[j].mean_)+'\n')
f_out.write(str(block_gmm[i].gausses_[j].covariance_)+'\n')
print("-----------IN FILE:"+str(index)+" training and saving blocks from "+str(index*stride)+" to "+str(end_block)+" done")
if __name__ == '__main__':
result_disk_address = ""
data_source = ""
width = 0
depth = 0
height = 0
process_num = 0
side = 0
if len(sys.argv) == 1:
result_disk_address = raw_input("input disk address:")
data_source = raw_input('input the data name:')
width = int(raw_input('weight:'))
depth = int(raw_input('depth:'))
height = int(raw_input('height:'))
side = int(raw_input('side:'))
process_num = int(raw_input('input the process num (must be the divisor of the block number):'))
else:
result_disk_address = sys.argv[1]
data_source = sys.argv[2]
width = int(sys.argv[3])
depth = int(sys.argv[4])
height = int(sys.argv[5])
side = int(sys.argv[6])
process_num = int(sys.argv[7])
if not os.path.exists(result_disk_address + data_source + ".raw"):
print('file doesn\'t exists')
exit(0)
print("disk address:" + result_disk_address)
print("data name:" + data_source)
print("width:" + str(width) + " depth:" + str(depth) + " height:" + str(height) + " side:" + str(side))
print("process num (file num):" + str(process_num))
ubg = 4
np.random.seed(1)
width_num = width / side
depth_num = depth / side
height_num = height / side
total_num = width_num * depth_num * height_num
data = [0] * width * depth * height
stride = (total_num+process_num-1) / process_num
print("stride:"+str(stride))
read_all_data(result_disk_address + data_source + '.raw', data)
begin_time = time.localtime(time.time())
cpu_time_begin = time.clock()
print("total_num = " + str(total_num))
proc_record = []
for i in range(0, process_num):
p = Process(target=train_blocks, args=(result_disk_address,
data_source,
total_num,
i,
stride,
data,
width,
depth,
width_num,
depth_num,
side,
ubg))
p.start()
proc_record.append(p)
for p in proc_record:
p.join()
print("training GMM done.")
cpu_time_end = time.clock()
print time.strftime('Training began at %Y-%m-%d %H:%M:%S',begin_time)
print time.strftime('Training finished at %Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("cpu time cost in python:" + str(cpu_time_end - cpu_time_begin)+"s.")
# train_single_block(73800)
| mit |
khkaminska/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
Srisai85/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
neuropower/neuropower | neuropower/apps/neuropowertoolbox/neuropowercore/cluster.py | 2 | 1826 | import numpy as np
import pandas as pd
""" Extract local maxima from a spm, return a csv file with variables:
- x coordinate
- y coordinate
- z coordinate
- peak height """
def PeakTable(spm,exc,mask):
# make a new array with an extra row/column/plane around the original array
spm_newdim = tuple(map(lambda x: x+2,spm.shape))
spm_ext = np.zeros((spm_newdim))
msk_ext = np.zeros((spm_newdim))
spm_ext.fill(-100)
spm_ext[1:(spm.shape[0]+1),1:(spm.shape[1]+1),1:(spm.shape[2]+1)] = spm
msk_ext[1:(spm.shape[0]+1),1:(spm.shape[1]+1),1:(spm.shape[2]+1)] = mask
spm_ext = spm_ext * msk_ext
shape = spm.shape
spm = None
# open peak csv
labels = ['x','y','z','peak']
peaks = pd.DataFrame(columns=labels)
# check for each voxel whether it's a peak, if it is, add to table
for m in xrange(1,shape[0]+1):
for n in xrange(1,shape[1]+1):
for o in xrange(1,shape[2]+1):
surroundings = None
res = None
if spm_ext[m,n,o]>exc:
surroundings=[spm_ext[m-1,n-1,o-1],
spm_ext[m-1,n-1,o],
spm_ext[m-1,n-1,o+1],
spm_ext[m-1,n,o-1],
spm_ext[m-1,n,o],
spm_ext[m-1,n,o+1],
spm_ext[m-1,n+1,o-1],
spm_ext[m-1,n+1,o],
spm_ext[m-1,n+1,o+1],
spm_ext[m,n-1,o-1],
spm_ext[m,n-1,o],
spm_ext[m,n-1,o+1],
spm_ext[m,n,o-1],
spm_ext[m,n,o+1],
spm_ext[m,n+1,o-1],
spm_ext[m,n+1,o],
spm_ext[m,n+1,o+1],
spm_ext[m+1,n-1,o-1],
spm_ext[m+1,n-1,o],
spm_ext[m+1,n-1,o+1],
spm_ext[m+1,n,o-1],
spm_ext[m+1,n,o],
spm_ext[m+1,n,o+1],
spm_ext[m+1,n+1,o-1],
spm_ext[m+1,n+1,o],
spm_ext[m+1,n+1,o+1]]
if spm_ext[m,n,o] > np.max(surroundings):
res =pd.DataFrame(data=[[m-1,n-1,o-1,spm_ext[m,n,o]]],columns=labels)
peaks=peaks.append(res)
peaks = peaks.set_index([range(len(peaks))])
return peaks
| mit |
BhallaLab/moose-full | moose-core/python/moose/recording.py | 2 | 4617 | from . import moose as _moose
_tick = 8
_base = '/_utils'
_path = _base + '/y{0}'
_counter = 0
_plots = []
_moose.Neutral( _base )
_defaultFields = {
_moose.Compartment : 'Vm',
_moose.ZombieCompartment : 'Vm',
_moose.HHChannel: 'Gk',
_moose.ZombieHHChannel: 'Gk',
_moose.HHChannel2D: 'Gk',
_moose.SynChan: 'Gk',
_moose.CaConc: 'Ca',
_moose.ZombieCaConc: 'Ca',
_moose.Pool: 'conc',
_moose.ZombiePool: 'conc',
_moose.ZPool: 'conc',
_moose.BufPool: 'conc',
_moose.ZombieBufPool: 'conc',
_moose.ZBufPool: 'conc',
_moose.FuncPool: 'conc',
_moose.ZombieFuncPool: 'conc',
_moose.ZFuncPool: 'conc',
}
def _defaultField( obj ):
return _defaultFields[ type( obj ) ]
def setDt( dt ):
'''-----------
Description
-----------
Sets time-step for recording values.
---------
Arguments
---------
dt: Time-step for recording values.
-------
Returns
-------
Nothing.'''
_moose.setClock( _tick, dt )
class SetupError( Exception ):
pass
def _time( npoints = None ):
import numpy
if npoints is None:
try:
npoints = len( _plots[ 0 ].vec )
except IndexError:
raise SetupError(
'List of time-points cannot be constructed because '
'no plots have been set up yet.'
)
begin = 0.0
end = _moose.Clock( '/clock' ).currentTime
return numpy.linspace( begin, end, npoints )
class _Plot( _moose.Table ):
def __init__( self, path, obj, field, label ):
_moose.Table.__init__( self, path )
self._table = _moose.Table( path )
self.obj = obj
self.field = field
self.label = label
@property
def values( self ):
return self._table.vec
@property
def size( self ):
return len( self.values )
@property
def time( self ):
return _time( self.size )
def __iter__( self ):
return iter( self.values )
def record( obj, field = None, label = None ):
'''
'''
global _counter
# Checking if object is an iterable like list or a tuple, but not a string.
if hasattr( obj, '__iter__' ):
return [ record( o, field, label ) for o in obj ]
if isinstance( obj, str ):
obj = _moose.element( obj )
if field is None:
field = _defaultField( obj )
path = _path.format( _counter )
_counter += 1
p = _Plot( path, obj, field, label )
_plots.append( p )
_moose.connect( p, "requestData", obj, 'get_' + field )
_moose.useClock( _tick, path, "process" )
return p
def _label( plot, labelFormat = '{path}.{field}' ):
# Over-ride label format if label has been given explicitly.
if plot.label:
labelFormat = plot.label
return labelFormat.format(
path = plot.obj.path,
name = plot.obj.name,
field = plot.field
)
def _selectedPlots( selected ):
if selected is None:
# Returning a copy of this list, instead of reference. The returned
# list will be manipulated later.
return _plots[ : ]
elif isinstance( selected, _Plot ):
return [ selected ]
else:
return selected
def saveCSV(
fileName,
selected = None,
delimiter = '\t',
header = True,
headerCommentCharacter = '#',
labelFormat = '{path}.{field}',
timeCol = True,
timeHeader = 'Time',
fileMode = 'w' ):
'''
'''
import csv
plots = _selectedPlots( selected )
if header:
header = []
if timeCol:
header.append( timeHeader )
for plot in plots:
header.append( _label( plot, labelFormat ) )
header[ 0 ] = headerCommentCharacter + header[ 0 ]
if timeCol:
plots.insert( 0, _time() )
with open( fileName, fileMode ) as fout:
writer = csv.writer( fout, delimiter = delimiter )
if header:
writer.writerow( header )
writer.writerows( list(zip( *plots )) )
def saveXPLOT(
fileName,
selected = None,
labelFormat = '{path}.{field}',
fileMode = 'w' ):
'''
'''
plots = _selectedPlots( selected )
with open( fileName, fileMode ) as fout:
write = lambda line: fout.write( line + '\n' )
for ( i, plot ) in enumerate( plots ):
label = '/plotname ' + _label( plot, labelFormat )
if i > 0:
write( '' )
write( '/newplot' )
write( label )
for value in plot:
write( str( value ) )
def show(
selected = None,
combine = True,
labelFormat = '{path}.{field}',
xLabel = 'Time (s)',
yLabel = '{field}' ):
'''
'''
try:
from matplotlib import pyplot as plt
except ImportError:
print("Warning: recording.show(): Cannot find 'matplotlib'. Not showing plots.")
return
plots = _selectedPlots( selected )
if combine:
plt.figure()
for plot in plots:
if not combine:
plt.figure()
print(_label( plot ))
plt.plot( plot.time, plot.values, label = _label( plot ) )
plt.legend()
plt.show()
def HDF5():
pass
| gpl-2.0 |
Achuth17/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
EntilZha/qb | guesser/classify/learn_classifiers.py | 1 | 2269 | from numpy import *
import nltk.classify.util
from util.math_util import *
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.linear_model import LogisticRegression
from scipy import stats
from collections import Counter
import cPickle, csv, random, argparse
# trains a classifier, saves it to disk, and evaluates on heldout data
def evaluate(train_qs, test_qs, params, d):
data = [train_qs, test_qs]
(W, b, W2, b2, W3, b3, L) = params
train_feats = []
test_feats = []
for tt, split in enumerate(data):
for qs, ans in split:
prev_qs = zeros((d, 1))
prev_sum = zeros((d, 1))
count = 0.
history = []
for dist in qs:
sent = qs[dist]
# input is average of all nouns in sentence
# av = average(L[:, sent], axis=1).reshape((d, 1))
history += sent
prev_sum += sum(L[:, sent], axis=1).reshape((d, 1))
if len(history) == 0:
av = zeros((d, 1))
else:
av = prev_sum / len(history)
# apply non-linearity
p = relu(W.dot(av) + b)
p2 = relu(W2.dot(p) + b2)
p3 = relu(W3.dot(p2) + b3)
curr_feats = {}
for dim, val in ndenumerate(p3):
curr_feats['__' + str(dim)] = val
if tt == 0:
train_feats.append( (curr_feats, ans[0]) )
else:
test_feats.append( (curr_feats, ans[0]) )
print 'total training instances:', len(train_feats)
print 'total testing instances:', len(test_feats)
random.shuffle(train_feats)
# can modify this classifier / do grid search on regularization parameter using sklearn
classifier = SklearnClassifier(LogisticRegression(C=10))
classifier.train(train_feats)
print 'accuracy train:', nltk.classify.util.accuracy(classifier, train_feats)
print 'accuracy test:', nltk.classify.util.accuracy(classifier, test_feats)
print ''
print 'dumping classifier'
cPickle.dump(classifier, open('data/deep/classifier', 'wb'),
protocol=cPickle.HIGHEST_PROTOCOL)
| mit |
bowenliu16/deepchem | deepchem/utils/save.py | 1 | 4968 | """
Simple utils to save and load from disk.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
# TODO(rbharath): Use standard joblib once old-data has been regenerated.
import joblib
from sklearn.externals import joblib as old_joblib
import gzip
import pickle
import pandas as pd
import numpy as np
import os
from rdkit import Chem
def log(string, verbose=True):
"""Print string if verbose."""
if verbose:
print(string)
def save_to_disk(dataset, filename, compress=3):
"""Save a dataset to file."""
joblib.dump(dataset, filename, compress=compress)
def get_input_type(input_file):
"""Get type of input file. Must be csv/pkl.gz/sdf file."""
filename, file_extension = os.path.splitext(input_file)
# If gzipped, need to compute extension again
if file_extension == ".gz":
filename, file_extension = os.path.splitext(filename)
if file_extension == ".csv":
return "csv"
elif file_extension == ".pkl":
return "pandas-pickle"
elif file_extension == ".joblib":
return "pandas-joblib"
elif file_extension == ".sdf":
return "sdf"
else:
raise ValueError("Unrecognized extension %s" % file_extension)
def load_data(input_files, shard_size=None, verbose=True):
"""Loads data from disk.
For CSV files, supports sharded loading for large files.
"""
if not len(input_files):
return
input_type = get_input_type(input_files[0])
if input_type == "sdf":
if shard_size is not None:
log("Ignoring shard_size for sdf input.", verbose)
for value in load_sdf_files(input_files):
yield value
elif input_type == "csv":
for value in load_csv_files(input_files, shard_size, verbose=verbose):
yield value
elif input_type == "pandas-pickle":
for input_file in input_files:
yield load_pickle_from_disk(input_file)
def load_sdf_files(input_files):
"""Load SDF file into dataframe."""
dataframes = []
for input_file in input_files:
# Tasks are stored in .sdf.csv file
raw_df = next(load_csv_files([input_file+".csv"], shard_size=None))
# Structures are stored in .sdf file
print("Reading structures from %s." % input_file)
suppl = Chem.SDMolSupplier(str(input_file), False, False, False)
df_rows = []
for ind, mol in enumerate(suppl):
if mol is not None:
smiles = Chem.MolToSmiles(mol)
df_rows.append([ind,smiles,mol])
mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))
dataframes.append(pd.concat([mol_df, raw_df], axis=1, join='inner'))
return dataframes
def load_csv_files(filenames, shard_size=None, verbose=True):
"""Load data as pandas dataframe."""
# First line of user-specified CSV *must* be header.
shard_num = 1
for filename in filenames:
if shard_size is None:
yield pd.read_csv(filename)
else:
log("About to start loading CSV from %s" % filename, verbose)
for df in pd.read_csv(filename, chunksize=shard_size):
log("Loading shard %d of size %s." % (shard_num, str(shard_size)),
verbose)
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_from_disk(filename):
"""Load a dataset from file."""
name = filename
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
if os.path.splitext(name)[1] == ".pkl":
return load_pickle_from_disk(filename)
elif os.path.splitext(name)[1] == ".joblib":
try:
return joblib.load(filename)
except KeyError:
# Try older joblib version for legacy files.
return old_joblib.load(filename)
elif os.path.splitext(name)[1] == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(filename, header=0)
df = df.replace(np.nan, str(""), regex=True)
return df
else:
raise ValueError("Unrecognized filetype for %s" % filename)
def load_sharded_csv(filenames):
"""Load a dataset from multiple files. Each file MUST have same column headers"""
dataframes = []
for name in filenames:
placeholder_name = name
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
if os.path.splitext(name)[1] == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(placeholder_name, header=0)
df = df.replace(np.nan, str(""), regex=True)
dataframes.append(df)
else:
raise ValueError("Unrecognized filetype for %s" % filename)
#combine dataframes
combined_df = dataframes[0]
for i in range(0, len(dataframes) - 1):
combined_df = combined_df.append(dataframes[i+1])
combined_df = combined_df.reset_index(drop=True)
return combined_df
def load_pickle_from_disk(filename):
"""Load dataset from pickle file."""
if ".gz" in filename:
with gzip.open(filename, "rb") as f:
df = pickle.load(f)
else:
with open(filename, "rb") as f:
df = pickle.load(f)
return df
| gpl-3.0 |
cwhanse/pvlib-python | pvlib/tests/iotools/test_psm3.py | 2 | 6112 | """
test iotools for PSM3
"""
import os
from pvlib.iotools import psm3
from ..conftest import DATA_DIR, RERUNS, RERUNS_DELAY
import numpy as np
import pandas as pd
import pytest
from requests import HTTPError
from io import StringIO
import warnings
TMY_TEST_DATA = DATA_DIR / 'test_psm3_tmy-2017.csv'
YEAR_TEST_DATA = DATA_DIR / 'test_psm3_2017.csv'
YEAR_TEST_DATA_5MIN = DATA_DIR / 'test_psm3_2019_5min.csv'
MANUAL_TEST_DATA = DATA_DIR / 'test_read_psm3.csv'
LATITUDE, LONGITUDE = 40.5137, -108.5449
HEADER_FIELDS = [
'Source', 'Location ID', 'City', 'State', 'Country', 'Latitude',
'Longitude', 'Time Zone', 'Elevation', 'Local Time Zone',
'Dew Point Units', 'DHI Units', 'DNI Units', 'GHI Units',
'Temperature Units', 'Pressure Units', 'Wind Direction Units',
'Wind Speed', 'Surface Albedo Units', 'Version']
PVLIB_EMAIL = '[email protected]'
@pytest.fixture(scope="module")
def nrel_api_key():
"""Supplies pvlib-python's NREL Developer Network API key.
Azure Pipelines CI utilizes a secret variable set to NREL_API_KEY
to mitigate failures associated with using the default key of
"DEMO_KEY". A user is capable of using their own key this way if
desired however the default key should suffice for testing purposes.
"""
try:
demo_key = os.environ["NREL_API_KEY"]
except KeyError:
warnings.warn(
"WARNING: NREL API KEY environment variable not set! "
"Using DEMO_KEY instead. Unexpected failures may occur."
)
demo_key = 'DEMO_KEY'
return demo_key
def assert_psm3_equal(header, data, expected):
"""check consistency of PSM3 data"""
# check datevec columns
assert np.allclose(data.Year, expected.Year)
assert np.allclose(data.Month, expected.Month)
assert np.allclose(data.Day, expected.Day)
assert np.allclose(data.Hour, expected.Hour)
assert np.allclose(data.Minute, expected.Minute)
# check data columns
assert np.allclose(data.GHI, expected.GHI)
assert np.allclose(data.DNI, expected.DNI)
assert np.allclose(data.DHI, expected.DHI)
assert np.allclose(data.Temperature, expected.Temperature)
assert np.allclose(data.Pressure, expected.Pressure)
assert np.allclose(data['Dew Point'], expected['Dew Point'])
assert np.allclose(data['Surface Albedo'], expected['Surface Albedo'])
assert np.allclose(data['Wind Speed'], expected['Wind Speed'])
assert np.allclose(data['Wind Direction'], expected['Wind Direction'])
# check header
for hf in HEADER_FIELDS:
assert hf in header
# check timezone
assert (data.index.tzinfo.zone == 'Etc/GMT%+d' % -header['Time Zone'])
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_tmy(nrel_api_key):
"""test get_psm3 with a TMY"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names='tmy-2017')
expected = pd.read_csv(TMY_TEST_DATA)
assert_psm3_equal(header, data, expected)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_singleyear(nrel_api_key):
"""test get_psm3 with a single year"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names='2017', interval=30)
expected = pd.read_csv(YEAR_TEST_DATA)
assert_psm3_equal(header, data, expected)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_5min(nrel_api_key):
"""test get_psm3 for 5-minute data"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names='2019', interval=5)
assert len(data) == 525600/5
first_day = data.loc['2019-01-01']
expected = pd.read_csv(YEAR_TEST_DATA_5MIN)
assert_psm3_equal(header, first_day, expected)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_check_leap_day(nrel_api_key):
_, data_2012 = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names="2012", interval=60,
leap_day=True)
assert len(data_2012) == (8760 + 24)
@pytest.mark.parametrize('latitude, longitude, api_key, names, interval',
[(LATITUDE, LONGITUDE, 'BAD', 'tmy-2017', 60),
(51, -5, nrel_api_key, 'tmy-2017', 60),
(LATITUDE, LONGITUDE, nrel_api_key, 'bad', 60),
(LATITUDE, LONGITUDE, nrel_api_key, '2017', 15),
])
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_tmy_errors(
latitude, longitude, api_key, names, interval
):
"""Test get_psm3() for multiple erroneous input scenarios.
These scenarios include:
* Bad API key -> HTTP 403 forbidden because api_key is rejected
* Bad latitude/longitude -> Coordinates were not found in the NSRDB.
* Bad name -> Name is not one of the available options.
* Bad interval, single year -> Intervals can only be 30 or 60 minutes.
"""
with pytest.raises(HTTPError) as excinfo:
psm3.get_psm3(latitude, longitude, api_key, PVLIB_EMAIL,
names=names, interval=interval)
# ensure the HTTPError caught isn't due to overuse of the API key
assert "OVER_RATE_LIMIT" not in str(excinfo.value)
@pytest.fixture
def io_input(request):
"""file-like object for parse_psm3"""
with MANUAL_TEST_DATA.open() as f:
data = f.read()
obj = StringIO(data)
return obj
def test_parse_psm3(io_input):
"""test parse_psm3"""
header, data = psm3.parse_psm3(io_input)
expected = pd.read_csv(YEAR_TEST_DATA)
assert_psm3_equal(header, data, expected)
def test_read_psm3():
"""test read_psm3"""
header, data = psm3.read_psm3(MANUAL_TEST_DATA)
expected = pd.read_csv(YEAR_TEST_DATA)
assert_psm3_equal(header, data, expected)
| bsd-3-clause |
georgetown-analytics/machine-learning | archive/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| mit |
CompPhysics/MachineLearning | doc/src/Regression/fit.py | 1 | 2660 | # Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from sklearn.metrics import mean_squared_error
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("EoS.csv"),'r')
# Read the EoS data as csv file and organize the data into two arrays with density and energies
EoS = pd.read_csv(infile, names=('Density', 'Energy'))
EoS['Energy'] = pd.to_numeric(EoS['Energy'], errors='coerce')
EoS = EoS.dropna()
Energies = EoS['Energy']
Density = EoS['Density']
# The design matrix now as function of various polytrops
Maxpolydegree = 30
X = np.zeros((len(Density),Maxpolydegree))
X[:,0] = 1.0
testerror = np.zeros(Maxpolydegree)
trainingerror = np.zeros(Maxpolydegree)
polynomial = np.zeros(Maxpolydegree)
trials = 100
for polydegree in range(1, Maxpolydegree):
polynomial[polydegree] = polydegree
for degree in range(polydegree):
X[:,degree] = Density**(degree/3.0)
# loop over trials in order to estimate the expectation value of the MSE
testerror[polydegree] = 0.0
trainingerror[polydegree] = 0.0
for samples in range(trials):
x_train, x_test, y_train, y_test = train_test_split(X, Energies, test_size=0.2)
model = LinearRegression(fit_intercept=True).fit(x_train, y_train)
ypred = model.predict(x_train)
ytilde = model.predict(x_test)
testerror[polydegree] += mean_squared_error(y_test, ytilde)
trainingerror[polydegree] += mean_squared_error(y_train, ypred)
testerror[polydegree] /= trials
trainingerror[polydegree] /= trials
print("Degree of polynomial: %3d"% polynomial[polydegree])
print("Mean squared error on training data: %.8f" % trainingerror[polydegree])
print("Mean squared error on test data: %.8f" % testerror[polydegree])
plt.plot(polynomial, np.log10(trainingerror), label='Training Error')
plt.plot(polynomial, np.log10(testerror), label='Test Error')
plt.xlabel('Polynomial degree')
plt.ylabel('log10[MSE]')
plt.legend()
plt.show()
| cc0-1.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/test_algos.py | 3 | 57570 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex,
Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.util.testing as tm
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestSafeSort(object):
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = algos.safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = algos.safe_sort(values)
expected = np.array(list("aaabbc"))
tm.assert_numpy_array_equal(result, expected)
values = []
result = algos.safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
def test_labels(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
labels = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_labels = algos.safe_sort(values, labels)
expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# na_sentinel
labels = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_labels = algos.safe_sort(values, labels,
na_sentinel=99)
expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# out of bound indices
labels = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_labels = algos.safe_sort(values, labels)
expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
labels = []
result, result_labels = algos.safe_sort(values, labels)
expected_labels = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer(self):
values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object)
result = algos.safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(['b', 1, 0, 'a'], dtype=object)
labels = [0, 1, 2, 3, 0, -1, 1]
result, result_labels = algos.safe_sort(values, labels)
expected = np.array([0, 1, 'a', 'b'], dtype=object)
expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
if compat.PY2 and not pd._np_version_under1p10:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with tm.assert_produces_warning(RuntimeWarning):
pytest.raises(TypeError, algos.safe_sort, arr)
else:
pytest.raises(TypeError, algos.safe_sort, arr)
def test_exceptions(self):
with tm.assert_raises_regex(TypeError,
"Only list-like objects are allowed"):
algos.safe_sort(values=1)
with tm.assert_raises_regex(TypeError,
"Only list-like objects or None"):
algos.safe_sort(values=[0, 1, 2], labels=1)
with tm.assert_raises_regex(ValueError,
"values should be unique"):
algos.safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = pd.Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = pd.Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = hashtable.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isnull(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isnull(key), expected == na_sentinel)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = pd.Categorical(list('bac'),
categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = pd.Categorical(list('bac'),
categories=list('abc'),
ordered=True)
# GH 15939
c = pd.Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = pd.Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(pd.Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = pd.CategoricalIndex(pd.Categorical(list('baabc'),
categories=list('bac')))
expected = pd.CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(pd.Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(pd.Categorical(list('aabc'))))
expected = pd.Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
expected_index = pd.IntervalIndex.from_breaks(
breaks).astype('category')
expected = Series([1, 1, 1, 1],
index=expected_index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = pd.Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(pd.Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1],
index=pd.CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(pd.Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=pd.CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(pd.Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=pd.Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
def test_numeric_object_likes(self):
cases = [np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10,
2**63, 39, 1, 3**5, 7], dtype=np.uint64)]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [pd.Index(case), pd.Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [pd.Index(case), pd.Index(case, dtype='category'),
pd.Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [pd.Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = hashtable.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = hashtable.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels appends to the vector
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array resizes the vector
uniques.to_array()
htable.get_labels(vals, uniques, 0, -1)
test_cases = [
(hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'),
(hashtable.StringHashTable, hashtable.ObjectVector, 'object'),
(hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'),
(hashtable.Int64HashTable, hashtable.Int64Vector, 'int64'),
(hashtable.UInt64HashTable, hashtable.UInt64Vector, 'uint64')]
for (tbl, vect, dtype) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0)
_test_vector_resize(tbl(), vect(), dtype, 10)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
def test_scipy_compat(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
assert (np.array_equal(result, expected))
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isnull(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0])]
assert (not libalgos.is_lexsorted(failure))
# def test_get_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype=np.int64)
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype=np.int64)
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype=np.int64)
# result = lib.get_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='mergesort')
assert (np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert (np.array_equal(result, expected))
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 'a', 'a'])
exp = Categorical(['a'], categories=[1, 'a'])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 'a', 'a'])
exp = Series(['a'], dtype=object)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(idx), exp)
| mit |
quantumlib/ReCirq | recirq/otoc/parallel_xeb.py | 1 | 31044 | # Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for performing parallel cross entropy benchmarking."""
from dataclasses import dataclass
from typing import Sequence, List, Set, Tuple, Dict, Union, Optional
import cirq
import numpy as np
import pybobyqa
from cirq.experiments.cross_entropy_benchmarking import _default_interaction_sequence
from matplotlib import pyplot as plt
from recirq.otoc.utils import (
bits_to_probabilities,
angles_to_fsim,
pauli_error_fit,
generic_fsim_gate,
cz_to_sqrt_iswap,
)
_rot_ops = [
cirq.X ** 0.5,
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5),
cirq.Y ** 0.5,
cirq.PhasedXPowGate(phase_exponent=0.75, exponent=0.5),
cirq.X ** -0.5,
cirq.PhasedXPowGate(phase_exponent=-0.75, exponent=0.5),
cirq.Y ** -0.5,
cirq.PhasedXPowGate(phase_exponent=-0.25, exponent=0.5),
]
_rot_mats = [cirq.unitary(r) for r in _rot_ops]
_fsim_angle_labels = [
"theta",
"delta_plus",
"delta_minus_off_diag",
"delta_minus_diag",
"phi",
]
@dataclass
class XEBData:
"""Class for storing the cycle-dependent fidelities and purities of an XEB experiment.
Each field contains two 1D arrays representing the circuit length (i.e. number of cycles) and
the corresponding fidelity or purity values. Fields containing 'fit' in their names are
fitting results to an exponential decay.
"""
fidelity_optimized: Tuple[np.ndarray, np.ndarray]
fidelity_optimized_fit: Tuple[np.ndarray, np.ndarray]
fidelity_unoptimized: Tuple[np.ndarray, np.ndarray]
fidelity_unoptimized_fit: Tuple[np.ndarray, np.ndarray]
purity: Tuple[np.ndarray, np.ndarray]
purity_fit: Tuple[np.ndarray, np.ndarray]
@dataclass
class ParallelXEBResults:
"""Class for storing results of a parallel-XEB experiment."""
fitted_gates: Dict[Tuple[Tuple[int, int], Tuple[int, int]], cirq.Circuit]
correction_gates: Dict[Tuple[Tuple[int, int], Tuple[int, int]], cirq.Circuit]
fitted_angles: Dict[Tuple[Tuple[int, int], Tuple[int, int]], Dict[str, float]]
final_errors_optimized: Dict[Tuple[Tuple[int, int], Tuple[int, int]], float]
final_errors_unoptimized: Dict[Tuple[Tuple[int, int], Tuple[int, int]], float]
purity_errors: Dict[Tuple[Tuple[int, int], Tuple[int, int]], float]
raw_data: Dict[Tuple[Tuple[int, int], Tuple[int, int]], XEBData]
def plot_xeb_results(xeb_results: ParallelXEBResults) -> None:
"""Plots the results of a parallel XEB experiment."""
for (q0, q1), xeb_data in xeb_results.raw_data.items():
# Plot the fidelities (both with unoptimized and optimized two-qubit unitaries) and speckle
# purities as functions of XEB cycles, for each qubit pair.
err_0 = xeb_results.final_errors_unoptimized[(q0, q1)]
err_1 = xeb_results.final_errors_optimized[(q0, q1)]
err_p = xeb_results.purity_errors[(q0, q1)]
fig = plt.figure()
plt.plot(
xeb_data.fidelity_unoptimized[0],
xeb_data.fidelity_unoptimized[1],
"ro",
figure=fig,
label=r"{} and {}, unoptimized [$r_p$ = {}]".format(q0, q1, err_0.__round__(5)),
)
plt.plot(
xeb_data.fidelity_optimized[0],
xeb_data.fidelity_optimized[1],
"bo",
figure=fig,
label=r"{} and {}, optimized [$r_p$ = {}]".format(q0, q1, err_1.__round__(5)),
)
plt.plot(
xeb_data.purity[0],
xeb_data.purity[1],
"go",
figure=fig,
label=r"{} and {}, purity error = {}".format(q0, q1, err_p.__round__(5)),
)
plt.plot(xeb_data.fidelity_unoptimized_fit[0], xeb_data.fidelity_unoptimized_fit[1], "r--")
plt.plot(xeb_data.fidelity_optimized_fit[0], xeb_data.fidelity_optimized_fit[1], "b--")
plt.plot(xeb_data.purity_fit[0], xeb_data.purity_fit[1], "g--")
plt.legend()
plt.xlabel("Number of Cycles")
plt.ylabel(r"XEB Fidelity")
num_pairs = len(list(xeb_results.final_errors_optimized.keys()))
pair_pos = np.linspace(0, 1, num_pairs)
# Plot the integrated histogram of Pauli errors for all pairs.
fig_0 = plt.figure()
plt.plot(
sorted(xeb_results.final_errors_unoptimized.values()),
pair_pos,
figure=fig_0,
label="Unoptimized Unitaries",
)
plt.plot(
sorted(xeb_results.final_errors_optimized.values()),
pair_pos,
figure=fig_0,
label="Optimized Unitaries",
)
plt.plot(
sorted(xeb_results.purity_errors.values()),
pair_pos,
figure=fig_0,
label="Purity Errors",
)
plt.xlabel(r"Pauli Error Rate, $r_p$")
plt.ylabel(r"Integrated Histogram")
plt.legend()
# Plot the shifts in the FSIM angles derived from fitting the XEB data.
fig_1 = plt.figure()
for label in _fsim_angle_labels:
shifts = [a[label] for a in xeb_results.fitted_angles.values()]
plt.plot(sorted(shifts), pair_pos, figure=fig_1, label=label)
plt.xlabel(r"FSIM Angle Error (Radian)")
plt.ylabel(r"Integrated Histogram")
plt.legend()
def build_xeb_circuits(
qubits: Sequence[cirq.GridQubit],
cycles: Sequence[int],
benchmark_ops: Sequence[Union[cirq.Moment, Sequence[cirq.Moment]]] = None,
random_seed: int = None,
sq_rand_nums: Optional[np.ndarray] = None,
reverse: bool = False,
z_only: bool = False,
ancilla: Optional[cirq.GridQubit] = None,
cycles_per_echo: Optional[int] = None,
light_cones: Optional[List[List[Set[cirq.GridQubit]]]] = None,
echo_indices: Optional[np.ndarray] = None,
) -> Tuple[List[cirq.Circuit], np.ndarray]:
r"""Builds random circuits for cross entropy benchmarking (XEB).
A list of cirq.Circuits of varying lengths are generated, which are made of random
single-qubit gates and optional two-qubit gates.
Args:
qubits: The qubits to be involved in XEB.
cycles: The different numbers of cycles the random circuits will have.
benchmark_ops: The operations to be inserted between random single-qubit gates. They can
be one or more cirq.Moment objects, or None (in which case no operation will be
inserted between the random single-qubit gates).
random_seed: The random seed for the single-qubit gates. If unspecified, no random seed
will be used.
sq_rand_nums: The random numbers representing the single-qubit gates. They must be
integers from 0 to 7 if the z_only is False, and floats between -1 and 1 if z_only is
True. The dimension of sq_rand_nums should be len(qubits) by max(cycles). If
unspecified, the gates will be generated in-situ at random.
reverse: If True, benchmark_ops will be applied before the random single-qubit gates in
each cycle. Otherwise, it will be applied after the random single-qubit gates.
z_only: Whether the single-qubit gates are to be random \pi/2 rotations around axes on
the equatorial plane of the Bloch sphere (z_only = False), or random rotations around
the z-axis (z_only = True). In the former case, the axes of rotations will be chosen
randomly from 8 evenly spaced axes ($\pi/4$, $\pi/2$ ... $7\pi/4$ radians from the
x-axis). In the latter case, the angles of rotation will be any random value between
$-\pi$ and $\pi$.
ancilla: If specified, an additional qubit will be included in the circuit which does not
interact with the other qubits and only has spin-echo pulses applied to itself.
cycles_per_echo: How often a spin-echo (Y gate) gate is to be applied to the ancilla
qubit. For example, if the value is 2, a Y gate will be applied every other cycle.
light_cones: A list of length 1 or 2, each specifying a lightcone correponding to a list
of sets of qubits with the same length as max(cycles). For each cycle, single-qubit
gates outside the first lightcone are either removed or replaced with a spin-echo
pulse. Single-qubit gates outside the second lightcone, if specified, are always
removed.
echo_indices: An array with the same dimension as sq_rand_nums and random integer values
of 1, 2, 3 or 4. They specify the spin-echo pulses applied to qubits outside the
first lightcone, which can be +/-X or +/-Y gates.
Returns:
all_circuits: A list of random circuits, each containing a specified number of cycles.
sq_gate_indices: An NxM array, where N is the number of qubits and M is the maximnum
number of cycles. The array elements are the indices for the random single-qubit gates.
"""
if light_cones is not None:
if len(light_cones) > 2:
raise ValueError("light_cones may only have length 1 or 2")
if benchmark_ops is not None:
num_d = len(benchmark_ops)
else:
num_d = 0
max_cycles = max(cycles)
single_rots, sq_gate_indices = _random_rotations(
qubits,
max_cycles,
random_seed,
sq_rand_nums,
light_cones,
echo_indices,
z_rotations_only=z_only,
)
all_circuits = [] # type: List[cirq.Circuit]
for num_cycles in cycles:
circuit_exp = cirq.Circuit()
for i in range(num_cycles):
c = i + 1 if not reverse else num_cycles - i
if ancilla is not None and cycles_per_echo is not None:
if c % cycles_per_echo == 0:
op_list = [cirq.Y(ancilla)]
op_list.extend(single_rots[i])
rots = cirq.Moment(op_list)
else:
rots = cirq.Moment(single_rots[i])
else:
rots = cirq.Moment(single_rots[i])
if reverse:
if benchmark_ops is not None:
circuit_exp.append(benchmark_ops[i % num_d])
circuit_exp.append(rots, strategy=cirq.InsertStrategy.NEW)
else:
circuit_exp.append(rots, strategy=cirq.InsertStrategy.NEW)
if benchmark_ops is not None:
circuit_exp.append(benchmark_ops[i % num_d])
all_circuits.append(circuit_exp)
return all_circuits, sq_gate_indices
def parallel_xeb_fidelities(
all_qubits: List[Tuple[int, int]],
num_cycle_range: Sequence[int],
measured_bits: List[List[List[np.ndarray]]],
scrambling_gates: List[List[np.ndarray]],
fsim_angles: Dict[str, float],
interaction_sequence: Optional[
List[Set[Tuple[Tuple[float, float], Tuple[float, float]]]]
] = None,
gate_to_fit: str = "iswap",
num_restarts: int = 3,
num_points: int = 8,
print_fitting_progress: bool = True,
) -> ParallelXEBResults:
"""Computes and optimizes cycle fidelities from parallel XEB data.
Args:
all_qubits: List of qubits involved in a parallel XEB experiment, specified using their
(row, col) locations.
num_cycle_range: The different numbers of cycles in the random circuits.
measured_bits: The experimental bit-strings stored in a nested list. The first dimension
of the nested list represents different configurations (e.g. how the two-qubit gates
are applied) used in parallel XEB. The second dimension represents different trials
(i.e. random circuit instances) used in XEB. The third dimension represents the
different numbers of cycles and must be the same as len(num_cycle_range). Each
np.ndarray has dimension M x N, where M is the number of repetitions (stats) for each
circuit and N is the number of qubits involved.
scrambling_gates: The random circuit indices specified as integers between 0 and 7. See
the documentation of build_xeb_circuits for details. The first dimension of the
nested list represents the different configurations and must be the same as the first
dimension of measured_bits. The second dimension represents the different trials and
must be the same as the second dimension of measured_bits.
fsim_angles: An initial guess for the five FSIM angles for each qubit pair.
interaction_sequence: The pairs of qubits with FSIM applied for each configuration. Must
be the same as len(measured_bits).
gate_to_fit: Can be either 'iswap', 'sqrt-iswap', 'cz' or any other string. Determines
the FSIM angles that will be changed from their initial guess values to optimize the
XEB fidelity of each qubit pair. For 'iswap', only 'delta_plus' and
'delta_minus_off_diag' are changed. For 'sqrt-iswap', 'delta_plus',
'delta_minus_off_diag' and 'delta_minus_diag' are changed. For 'cz',
only 'delta_plus' and 'delta_minus_diag' are changed. For any other string, all five
angles are changed.
num_restarts: Number of restarts with different random initial guesses.
num_points: The total number of XEB fidelities to be used in the cost function for
optimization. Default is 8, such that the cost function is the sum of the XEB
fidelities for the first 8 numbers of cycles in num_cycle_range.
print_fitting_progress: Whether to print progress during the fitting process.
Returns:
A ParallelXEBResults object that contains the following fields:
fitted_gates: A dictionary with qubit pairs as keys and optimized FSIM unitaries,
represented by cirq.Circuit objects, as values.
correction_gates: Same as fitted_gates, but with all Z rotations reversed in signs.
fitted_angles: A dictionary with qubit pairs as keys and optimized FSIM unitaries as
values. Here the FSIM unitaries are represented as a dictionaries with the names of
the FSIM phases as keys and their fitted values as values.
final_errors_optimized: A dictionary with qubit pairs as keys and their cycle errors
after fitting as values.
final_errors_unoptimized: A dictionary with qubit pairs as keys and their cycle errors
before fitting as values.
purity_errors: A dictionary with qubit pairs as keys and their speckle purity errors per
cycle as values.
raw_data: A dictionary with qubit pairs as keys and XEBData as values. Each XEBData
contains the cycle-dependent XEB fidelities and purities, as well as their fits.
"""
num_trials = len(measured_bits[0])
p_data_all, sq_gates = _pairwise_xeb_probabilities(
all_qubits,
num_cycle_range,
measured_bits,
scrambling_gates,
interaction_sequence,
)
final_errors_unoptimized = {}
final_errors_optimized = {}
delta_angles = {}
purity_errors = {}
fitted_gates = {}
fitted_angles = {}
correction_gates = {}
raw_data = {}
for (q0, q1), p_data in p_data_all.items():
if print_fitting_progress:
print("Fitting qubits {} and {}".format(q0, q1))
def xeb_fidelity(
angle_shifts: np.ndarray, num_p: int
) -> Tuple[float, List[float], np.ndarray, np.ndarray, float]:
new_angles = fsim_angles.copy()
for i, angle_name in enumerate(_fsim_angle_labels):
new_angles[angle_name] += angle_shifts[i]
fsim_mat = angles_to_fsim(**new_angles)
max_cycles = num_cycle_range[num_p - 1]
p_sim = [np.zeros((num_trials, 4)) for _ in range(max_cycles)]
for i in range(num_trials):
unitary = np.identity(4, dtype=complex)
for j in range(max_cycles):
mat_0 = _rot_mats[sq_gates[(q0, q1)][i][0, j]]
mat_1 = _rot_mats[sq_gates[(q0, q1)][i][1, j]]
unitary = np.kron(mat_0, mat_1).dot(unitary)
unitary = fsim_mat.dot(unitary)
if j + 1 in num_cycle_range:
idx = num_cycle_range.index(j + 1)
p_sim[idx][i, :] = np.abs(unitary[:, 0]) ** 2
fidelities = [_alpha_least_square(p_sim[i], p_data[i]) for i in range(num_p)]
cost = -np.sum(fidelities)
err, x_vals, y_vals = pauli_error_fit(
np.asarray(num_cycle_range)[0:num_p],
np.asarray(fidelities),
add_offset=False,
)
return err, fidelities, x_vals, y_vals, cost
def cost_function(angle_shifts: np.ndarray) -> float:
# Accepts shifts in a variable number of FSIM angles and outputs a cost function (i.e.
# XEB fidelity). If the sqrt-iSWAP is the gate, shifts in delta_plus,
# delta_minus_off_diag and delta_minus_diag are specified. If iSWAP is the gate,
# shifts in delta_plus and delta_minus_off_diag are specified. If CZ is the gate,
# shifts in delta_plus and delta_minus_diag are specified. In other cases, shifts in
# all 5 angles are specified. The unspecified angles are set to have zero shifts from
# their initial values.
if gate_to_fit == "sqrt-iswap":
full_shifts = np.zeros(5, dtype=float)
full_shifts[1:4] = angle_shifts
elif gate_to_fit == "iswap":
full_shifts = np.zeros(5, dtype=float)
full_shifts[1:3] = angle_shifts
elif gate_to_fit == "cz" or gate_to_fit == "composite-cz":
full_shifts = np.zeros(5, dtype=float)
full_shifts[1] = angle_shifts[0]
full_shifts[3] = angle_shifts[1]
else:
full_shifts = angle_shifts
_, _, _, _, cost = xeb_fidelity(full_shifts, num_p=num_points)
return cost
sp_purities = [_speckle_purity(p_data[i]) ** 0.5 for i in range(len(num_cycle_range))]
err_p, x_vals_p, y_vals_p = pauli_error_fit(
np.asarray(num_cycle_range), np.asarray(sp_purities), add_offset=True
)
purity_errors[(q0, q1)] = err_p
err_0, f_vals_0, x_fitted_0, y_fitted_0, _ = xeb_fidelity(
np.zeros(5), num_p=len(num_cycle_range)
)
final_errors_unoptimized[(q0, q1)] = err_0
# Set up initial guesses on the relevant FSIM angles according to the ideal gate. See
# comments in cost_function. All angles are allowed to shift up to +/- 1 rad from their
# ideal (initial guess) values.
err_min = 1.0
soln_vec = np.zeros(5)
if gate_to_fit == "sqrt-iswap":
init_guess = np.zeros(3)
bounds = (np.ones(3) * -1.0, np.ones(3) * 1.0)
elif gate_to_fit == "iswap" or gate_to_fit == "cz" or gate_to_fit == "composite-cz":
init_guess = np.zeros(2)
bounds = (np.ones(2) * -1.0, np.ones(2) * 1.0)
else:
init_guess = np.array([0.0, 0.0, 0.0, 0.0, 0.1])
bounds = (np.ones(5) * -1.0, np.ones(5) * 1.0)
for _ in range(num_restarts):
res = pybobyqa.solve(
cost_function, init_guess, maxfun=3000, bounds=bounds, rhoend=1e-11
)
# Randomize the initial values for the relevant FSIM angles.
if gate_to_fit == "sqrt-iswap":
init_guess = np.random.uniform(-0.3, 0.3, 3)
elif gate_to_fit == "iswap" or gate_to_fit == "cz" or gate_to_fit == "composite-cz":
init_guess = np.random.uniform(-0.3, 0.3, 2)
else:
init_guess = np.random.uniform(-0.2, 0.2, 5)
init_guess[0] = 0.0
init_guess[4] = 0.1
if res.f < err_min:
err_min = res.f
if gate_to_fit == "sqrt-iswap":
soln_vec = np.zeros(5)
soln_vec[1:4] = np.asarray(res.x)
elif gate_to_fit == "iswap":
soln_vec = np.zeros(5)
soln_vec[1:3] = np.asarray(res.x)
elif gate_to_fit == "cz" or gate_to_fit == "composite-cz":
soln_vec = np.zeros(5)
soln_vec[1] = np.asarray(res.x)[0]
soln_vec[3] = np.asarray(res.x)[1]
else:
soln_vec = np.asarray(res.x)
err_1, f_vals_1, x_fitted_1, y_fitted_1, _ = xeb_fidelity(
soln_vec, num_p=len(num_cycle_range)
)
final_errors_optimized[(q0, q1)] = err_1
delta_angles[(q0, q1)] = {a: soln_vec[i] for i, a in enumerate(_fsim_angle_labels)}
new_angles = fsim_angles.copy()
for k, v in new_angles.items():
new_angles[k] += delta_angles[(q0, q1)][k]
fitted_angles[(q0, q1)] = new_angles
q_0 = cirq.GridQubit(*q0)
q_1 = cirq.GridQubit(*q1)
gate_list = generic_fsim_gate(new_angles, (q_0, q_1))
circuit_fitted = cirq.Circuit(gate_list)
fitted_gates[(q0, q1)] = circuit_fitted
# Use the fitted FSIM to set up the virtual-Z gates that are needed to cancel out the
# shifts in the SQ phases (i.e. delta angles).
corrected_angles = new_angles.copy()
corrected_angles["delta_plus"] *= -1.0
corrected_angles["delta_minus_off_diag"] *= -1.0
corrected_angles["delta_minus_diag"] *= -1.0
corrected_angles["theta"] = fsim_angles["theta"]
corrected_angles["phi"] = fsim_angles["phi"]
gate_list_corrected = generic_fsim_gate(corrected_angles, (q_0, q_1))
if gate_to_fit == "composite-cz":
circuit_corrected = cirq.Circuit(gate_list_corrected[0:2])
circuit_corrected.append(cz_to_sqrt_iswap(q_0, q_1))
circuit_corrected.append(cirq.Moment(gate_list_corrected[-2:]))
else:
circuit_corrected = cirq.Circuit(gate_list_corrected)
correction_gates[(q0, q1)] = circuit_corrected
raw_data[(q0, q1)] = XEBData(
fidelity_optimized=(np.asarray(num_cycle_range), np.asarray(f_vals_1)),
fidelity_optimized_fit=(x_fitted_1, y_fitted_1),
fidelity_unoptimized=(np.asarray(num_cycle_range), np.asarray(f_vals_0)),
fidelity_unoptimized_fit=(x_fitted_0, y_fitted_0),
purity=(np.asarray(num_cycle_range), np.asarray(sp_purities)),
purity_fit=(x_vals_p, y_vals_p),
)
return ParallelXEBResults(
fitted_gates=fitted_gates,
correction_gates=correction_gates,
fitted_angles=fitted_angles,
final_errors_optimized=final_errors_optimized,
final_errors_unoptimized=final_errors_unoptimized,
purity_errors=purity_errors,
raw_data=raw_data,
)
def _random_rotations(
qubits: Sequence[cirq.GridQubit],
num_layers: int,
rand_seed: Optional[int] = None,
rand_nums: Optional[np.ndarray] = None,
light_cones: Optional[List[List[Set[cirq.GridQubit]]]] = None,
echo_indices: Optional[np.ndarray] = None,
z_rotations_only: bool = False,
) -> Tuple[List[List[cirq.OP_TREE]], np.ndarray]:
"""Generate random single-qubit rotations and group them into different circuit layers."""
num_qubits = len(qubits)
random_state = cirq.value.parse_random_state(rand_seed)
if rand_nums is None:
if z_rotations_only:
rand_nums = random_state.uniform(-1, 1, (num_qubits, num_layers))
else:
rand_nums = random_state.choice(8, (num_qubits, num_layers))
single_q_layers = [] # type: List[List[cirq.OP_TREE]]
for i in range(num_layers):
op_seq = []
for j in range(num_qubits):
gate_choice = 0
if light_cones is not None:
if len(light_cones) == 1:
if qubits[j] not in light_cones[0][i]:
gate_choice = 1
elif len(light_cones) == 2:
if qubits[j] not in light_cones[1][i]:
gate_choice = 2
elif qubits[j] not in light_cones[0][i]:
gate_choice = 1
if gate_choice == 0:
if z_rotations_only:
op_seq.append(cirq.Z(qubits[j]) ** rand_nums[j, i])
else:
op_seq.append(_rot_ops[rand_nums[j, i]](qubits[j]))
elif gate_choice == 1:
if echo_indices is None:
op_seq.append(cirq.Y(qubits[j]))
else:
if echo_indices[j, i] > 0:
op_seq.append(_spin_echo_gates(echo_indices[j, i])(qubits[j]))
else:
continue
single_q_layers.append(op_seq)
return single_q_layers, rand_nums
def _alpha_least_square(probs_exp: np.ndarray, probs_data: np.ndarray) -> float:
"""Compare an ideal and an experimental probability distribution and compute their
cross-entropy fidelity.
"""
if probs_exp.shape != probs_data.shape:
raise ValueError("probs_exp and probs_data must have the same shape")
num_trials, num_states = probs_exp.shape
p_exp = np.maximum(probs_exp, np.zeros_like(probs_exp) + 1e-22)
p_data = np.maximum(probs_data, np.zeros_like(probs_data))
nominator = 0.0
denominator = 0.0
p_uni = 1.0 / num_states
for i in range(num_trials):
s_incoherent = -np.sum(p_uni * np.log(p_exp[i, :]))
s_expected = -np.sum(p_exp[i, :] * np.log(p_exp[i, :]))
s_meas = -np.sum(p_data[i, :] * np.log(p_exp[i, :]))
delta_h_meas = float(s_incoherent - s_meas)
delta_h = float(s_incoherent - s_expected)
nominator += delta_h_meas * delta_h
denominator += delta_h ** 2
return nominator / denominator
def _speckle_purity(probs_data: np.ndarray) -> float:
"""Compute the speckle purity of a probability distribution"""
d = 4
return np.var(probs_data) * d ** 2 * (d + 1) / (d - 1)
def _pairwise_xeb_probabilities(
all_qubits: List[Tuple[int, int]],
num_cycle_range: Sequence[int],
measured_bits: List[List[List[np.ndarray]]],
scrambling_gates: List[List[np.ndarray]],
interaction_sequence: Optional[
List[Set[Tuple[Tuple[float, float], Tuple[float, float]]]]
] = None,
) -> Tuple[
Dict[Tuple[Tuple[int, int], Tuple[int, int]], List[np.ndarray]],
Dict[Tuple[Tuple[int, int], Tuple[int, int]], List[np.ndarray]],
]:
"""Computes the probability distributions of each qubit pair in a parallel XEB experiment.
Args:
all_qubits: List of qubits involved in parallel XEB, specified as (col, row) tuples.
num_cycle_range: Different numbers of circuit cycles used in parallel XEB.
measured_bits: The experimental bit-strings stored in a nested list. The first dimension
of the nested list represents different configurations (e.g. how the two-qubit gates
are applied) used in parallel XEB. The second dimension represents different trials
(i.e. random circuit instances) used in XEB. The third dimension represents the
different numbers of cycles and must be the same as len(num_cycle_range). Each
np.ndarray has dimension M x N, where M is the number of repetitions (stats) for each
circuit and N is the number of qubits involved.
scrambling_gates: The random circuit indices specified as integers between 0 and 7. See
the documentation of build_xeb_circuits for details. The first dimension of the
nested list represents the different configurations and must be the same as the first
dimension of measured_bits. The second dimension represents the different trials and
must be the same as the second dimension of measured_bits.
interaction_sequence: The pairs of qubits with FSIM applied for each configuration. Must
be the same as len(measured_bits).
Returns:
p_data_all: Keys are qubit pairs. Each value is a list (with length =
len(num_cycle_range)) of np.array. The rows of the array are of length 4 and
represent the measured probabilities of two-qubit basis states. Each row represents
the result from a different trial (circuit instance).
sq_gates: Keys are qubit pairs. Each value is a list (with length = number of circuit
instances) of np.array. Each array contains the indices (integers between 0 and 7)
for the random SQ gates relevant to the given qubit pair.
"""
num_trials = len(measured_bits[0])
qubits = [cirq.GridQubit(*idx) for idx in all_qubits]
if interaction_sequence is None:
int_layers = _default_interaction_sequence(qubits)
else:
int_layers = [
{(cirq.GridQubit(i, j), cirq.GridQubit(k, l)) for ((i, j), (k, l)) in layer}
for layer in interaction_sequence
]
p_data_all = {}
sq_gates = {}
for l, qubit_set in enumerate(int_layers):
qubit_pairs = [
((q_s[0].row, q_s[0].col), (q_s[1].row, q_s[1].col)) for q_s in int_layers[l]
]
p_data = {
q_s: [np.zeros((num_trials, 4)) for _ in range(len(num_cycle_range))]
for q_s in qubit_pairs
}
for (q0, q1) in qubit_pairs:
idx_0, idx_1 = all_qubits.index(q0), all_qubits.index(q1)
sq_gates[(q0, q1)] = [
scrambling_gates[l][k][[idx_0, idx_1], :] for k in range(num_trials)
]
for i in range(num_trials):
for j in range(len(num_cycle_range)):
bits = measured_bits[l][i][j]
for q_s in qubit_pairs:
p_data[q_s][j][i, :] = bits_to_probabilities(all_qubits, q_s, bits)
p_data_all = {**p_data_all, **p_data}
return p_data_all, sq_gates
def _spin_echo_gates(idx: int) -> cirq.ops:
"""Outputs one of 4 single-qubit pi rotations which is used for spin echoes."""
pi_pulses = [
cirq.PhasedXPowGate(phase_exponent=0.0, exponent=1.0),
cirq.PhasedXPowGate(phase_exponent=0.5, exponent=1.0),
cirq.PhasedXPowGate(phase_exponent=1.0, exponent=1.0),
cirq.PhasedXPowGate(phase_exponent=-0.5, exponent=1.0),
]
return pi_pulses[idx - 1]
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.